query
stringlengths 8
6.75k
| document
stringlengths 9
1.89M
| negatives
sequencelengths 19
19
| metadata
dict |
---|---|---|---|
Removes a zone rcode0 API docs: | func (s *ZoneManagementService) Delete(zone string) (*StatusResponse, error) {
resp, err := s.client.NewRequest().
SetPathParams(
map[string]string{
"zone": zone,
}).
Delete(
s.client.BaseURL.String() +
s.client.APIVersion +
RC0Zone,
)
if err != nil {
return nil, err
}
return s.client.ResponseToRC0StatusResponse(resp)
} | [
"func (u *Unbound) ZoneRemove(zoneName string) error {\n\tczoneName := C.CString(zoneName)\n\tdefer C.free(unsafe.Pointer(czoneName))\n\ti := C.ub_ctx_zone_remove(u.ctx, czoneName)\n\treturn newError(int(i))\n}",
"func (c *NegAssertionImpl) RemoveZone(zone string) {\n\tif set, ok := c.zoneMap.Remove(zone); ok {\n\t\tfor _, key := range set.(*safeHashMap.Map).GetAllKeys() {\n\t\t\tv, ok := c.cache.Remove(key)\n\t\t\tif ok {\n\t\t\t\tvalue := v.(*negAssertionCacheValue)\n\t\t\t\tvalue.mux.Lock()\n\t\t\t\tif value.deleted {\n\t\t\t\t\tvalue.mux.Unlock()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tvalue.deleted = true\n\t\t\t\tc.counter.Sub(len(value.sections))\n\t\t\t\tvalue.mux.Unlock()\n\t\t\t}\n\t\t}\n\t}\n}",
"func (v *DNSView) RemoveZone(info DNSZoneInfo) error {\n\treturn fmt.Errorf(\"zone deletion is implicit\")\n}",
"func (nt *NodeTree) removeZone(zone string) {\n\tdelete(nt.tree, zone)\n\tfor i, z := range nt.zones {\n\t\tif z == zone {\n\t\t\tnt.zones = append(nt.zones[:i], nt.zones[i+1:]...)\n\t\t}\n\t}\n}",
"func (s *DNSService) DeleteZone(ctx context.Context, zoneUUID string) (*http.Response, error) {\n\tpath := fmt.Sprintf(\"api/xdns/2019-05-27/zones/%v\", zoneUUID)\n\tresp, err := s.client.delete(ctx, path)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\n\treturn resp, nil\n}",
"func (handler *DnsHandler) ZoneDelete(zone string) error {\n\treturn nil\n}",
"func (dm *DataManager) DeleteZone(ctx context.Context, req *pb.DeleteZoneReq) (*pb.DeleteZoneResp, error) {\n\trtime := time.Now()\n\tlogger.V(2).Infof(\"DeleteZone[%d]| input[%+v]\", req.Seq, req)\n\tresponse := &pb.DeleteZoneResp{Seq: req.Seq, ErrCode: pbcommon.ErrCode_E_OK, ErrMsg: \"OK\"}\n\n\tdefer func() {\n\t\tcost := dm.collector.StatRequest(\"DeleteZone\", response.ErrCode, rtime, time.Now())\n\t\tlogger.V(2).Infof(\"DeleteZone[%d]| output[%dms][%+v]\", req.Seq, cost, response)\n\t}()\n\n\taction := zoneaction.NewDeleteAction(dm.viper, dm.smgr, req, response)\n\tdm.executor.Execute(action)\n\n\treturn response, nil\n}",
"func (zs *Zones) Remove(z *Zone) {\n\tzs.Lock()\n\tz.online = false\n\tdelete(zs.zones, z.Name().Key())\n\tzs.Unlock()\n}",
"func (c *assertionCacheImpl) RemoveZone(zone string) {\n\tif set, ok := c.zoneMap.Remove(zone); ok {\n\t\tfor _, key := range set.(*safeHashMap.Map).GetAllKeys() {\n\t\t\tv, ok := c.cache.Remove(key)\n\t\t\tif ok {\n\t\t\t\tvalue := v.(*assertionCacheValue)\n\t\t\t\tvalue.mux.Lock()\n\t\t\t\tif value.deleted {\n\t\t\t\t\tvalue.mux.Unlock()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tvalue.deleted = true\n\t\t\t\tfor _, val := range value.assertions {\n\t\t\t\t\tc.mux.Lock()\n\t\t\t\t\tc.entriesPerAssertionMap[val.assertion.Hash()]--\n\t\t\t\t\tif c.entriesPerAssertionMap[val.assertion.Hash()] == 0 {\n\t\t\t\t\t\t//FIXME CFE readd consistency checks\n\t\t\t\t\t\t//delete(c.entriesPerAssertionMap, val.assertion.Hash())\n\t\t\t\t\t\t//consistCache.Remove(val.assertion)\n\t\t\t\t\t}\n\t\t\t\t\tc.mux.Unlock()\n\t\t\t\t}\n\t\t\t\tc.counter.Sub(len(value.assertions))\n\t\t\t\tvalue.mux.Unlock()\n\t\t\t}\n\t\t}\n\t}\n}",
"func (d *DebugDNSProvider) DeleteZone(name string) error {\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"zone\": name,\n\t}).Debug(\"Calling DeleteZone\")\n\n\terr := d.p.DeleteZone(name)\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"zone\": name,\n\t\t\"err\": err,\n\t}).Debug(\"Called DeleteZone\")\n\n\treturn err\n}",
"func (z *Zones) Remove(name string) {\n\tz.Lock()\n\n\tif zo, ok := z.Z[name]; ok {\n\t\tzo.OnShutdown()\n\t}\n\n\tdelete(z.Z, name)\n\n\t// TODO(miek): just regenerate Names (might be bad if you have a lot of zones...)\n\tz.names = []string{}\n\tfor n := range z.Z {\n\t\tz.names = append(z.names, n)\n\t}\n\n\tz.Unlock()\n}",
"func (client ZonesClient) Delete(resourceGroupName string, zoneName string, ifMatch string, ifNoneMatch string, cancel <-chan struct{}) (result autorest.Response, err error) {\n\treq, err := client.DeletePreparer(resourceGroupName, zoneName, ifMatch, ifNoneMatch, cancel)\n\tif err != nil {\n\t\treturn result, autorest.NewErrorWithError(err, \"dns.ZonesClient\", \"Delete\", nil, \"Failure preparing request\")\n\t}\n\n\tresp, err := client.DeleteSender(req)\n\tif err != nil {\n\t\tresult.Response = resp\n\t\treturn result, autorest.NewErrorWithError(err, \"dns.ZonesClient\", \"Delete\", resp, \"Failure sending request\")\n\t}\n\n\tresult, err = client.DeleteResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"dns.ZonesClient\", \"Delete\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}",
"func ZoneRecordDelete(c eurodnsgo.Client, z Zone, r Record) error {\n\treturn nil\n}",
"func (_DNSResolverContract *DNSResolverContractTransactor) ClearDNSZone(opts *bind.TransactOpts, _node [32]byte) (*types.Transaction, error) {\n\treturn _DNSResolverContract.contract.Transact(opts, \"clearDNSZone\", _node)\n}",
"func (_DNSResolverContract *DNSResolverContractTransactorSession) ClearDNSZone(_node [32]byte) (*types.Transaction, error) {\n\treturn _DNSResolverContract.Contract.ClearDNSZone(&_DNSResolverContract.TransactOpts, _node)\n}",
"func (zone *ZoneFile) Remove(rr *ResourceRecord) (err error) {\n\tif rr.Type == QueryTypeSOA {\n\t\tzone.SOA = nil\n\t} else {\n\t\tif zone.Records.remove(rr) {\n\t\t\terr = zone.Save()\n\t\t}\n\t}\n\treturn err\n}",
"func (u *Unbound) ZoneAdd(zoneName, zoneType string) error {\n\tczoneName := C.CString(zoneName)\n\tdefer C.free(unsafe.Pointer(czoneName))\n\tczoneType := C.CString(zoneType)\n\tdefer C.free(unsafe.Pointer(czoneType))\n\ti := C.ub_ctx_zone_add(u.ctx, czoneName, czoneType)\n\treturn newError(int(i))\n}",
"func (d *Data) DelZones() {\n\n\t// Set the current command:\n\td.command = \"zone:del\"\n\n\t// Create an NS1 API client:\n\thttpClient := &http.Client{Timeout: time.Second * 10}\n\tclient := api.NewClient(httpClient, api.SetAPIKey(d.APIKey))\n\n\t// For each requested zone:\n\tfor _, zone := range d.Zones {\n\n\t\t// Send the delete zone request:\n\t\tif _, err := client.Zones.Delete(zone); err != nil {\n\t\t\tlog.WithFields(log.Fields{\"cmd\": \"ns1:\" + d.command, \"id\": zone}).\n\t\t\t\tFatal(err)\n\t\t}\n\n\t\t// Log zone deletion:\n\t\tlog.WithFields(log.Fields{\"cmd\": \"ns1:\" + d.command, \"id\": zone}).\n\t\t\tInfo(\"DNS zone deleted\")\n\t}\n}",
"func (zone *ZoneFile) Delete() (err error) {\n\treturn os.Remove(zone.Path)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewUpdateRTResponsePoliciesOK creates a UpdateRTResponsePoliciesOK with default headers values | func NewUpdateRTResponsePoliciesOK() *UpdateRTResponsePoliciesOK {
return &UpdateRTResponsePoliciesOK{}
} | [
"func NewGetRTResponsePoliciesDefault(code int) *GetRTResponsePoliciesDefault {\n\treturn &GetRTResponsePoliciesDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func AddToExistingPolicies(aReview *v1.AdmissionReview) RuleResponse {\n\tvar rr RuleResponse\n\tvar aPolicy AdmissionPolicy\n\n\taJSON, _ := aReview.Request.Object.MarshalJSON()\n\n\terr := json.Unmarshal(aJSON, &aPolicy)\n\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t}\n\n\tfor _, rule := range aPolicy.Spec.Rules {\n\t\tvm := goja.New()\n\t\t_, err = vm.RunString(rule.Rule)\n\t\tif err != nil {\n\t\t\trr.Allowed = false\n\t\t\trr.Message = \"Policy did not pass checks. Check for errors in policy\"\n\t\t\trr.Status = \"Failure\"\n\n\t\t\treturn rr\n\t\t}\n\t}\n\n\tlogger.LogStuff(\"Policy Review Object is:\", aReview)\n\n\tlogger.LogStuff(\"Policy is: \", aPolicy)\n\n\tAdmissionPolicies = append(AdmissionPolicies, aPolicy)\n\n\trr.Allowed = true\n\trr.Message = \"Policy loaded\"\n\trr.Status = \"Success\"\n\n\tPrintPolicies()\n\n\treturn rr\n\n}",
"func newVaultPolicies(c *VaultPolicyV1Client) *vaultpolicies {\n\treturn &vaultpolicies{\n\t\tclient: c.RESTClient(),\n\t}\n}",
"func NewUpdateSecurityPolicyOK() *UpdateSecurityPolicyOK {\n\treturn &UpdateSecurityPolicyOK{}\n}",
"func UA_SetPublishingModeResponse_new() []UA_SetPublishingModeResponse {\n\treturn UA_new((*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[147]))[:]).([]UA_SetPublishingModeResponse)\n}",
"func UA_ModifySubscriptionResponse_new() []UA_ModifySubscriptionResponse {\n\treturn UA_new((*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[118]))[:]).([]UA_ModifySubscriptionResponse)\n}",
"func newPolicies(config *viper.Viper) (PoliciesConnecter, error) {\n\tif config == nil {\n\t\treturn nil, errors.New(\"Must provide config to mf2c.newPolicies()\")\n\t}\n\n\tif config.GetString(isLeaderProp) != \"\" {\n\t\treturn NewPoliciesMock(config.GetBool(isLeaderProp)), nil\n\t}\n\treturn NewPolicies(config.GetString(policiesURLProp))\n}",
"func CreateDescribeBackupPoliciesResponse() (response *DescribeBackupPoliciesResponse) {\n\tresponse = &DescribeBackupPoliciesResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func (p *PoliciesCreateOrUpdatePoller) FinalResponse(ctx context.Context) (PoliciesCreateOrUpdateResponse, error) {\n\trespType := PoliciesCreateOrUpdateResponse{}\n\tresp, err := p.pt.FinalResponse(ctx, &respType.CdnWebApplicationFirewallPolicy)\n\tif err != nil {\n\t\treturn PoliciesCreateOrUpdateResponse{}, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}",
"func (p *BackupShortTermRetentionPoliciesCreateOrUpdatePoller) FinalResponse(ctx context.Context) (BackupShortTermRetentionPoliciesCreateOrUpdateResponse, error) {\n\trespType := BackupShortTermRetentionPoliciesCreateOrUpdateResponse{}\n\tresp, err := p.pt.FinalResponse(ctx, &respType.BackupShortTermRetentionPolicy)\n\tif err != nil {\n\t\treturn BackupShortTermRetentionPoliciesCreateOrUpdateResponse{}, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}",
"func (p *LongTermRetentionPoliciesCreateOrUpdatePoller) FinalResponse(ctx context.Context) (LongTermRetentionPoliciesCreateOrUpdateResponse, error) {\n\trespType := LongTermRetentionPoliciesCreateOrUpdateResponse{}\n\tresp, err := p.pt.FinalResponse(ctx, &respType.LongTermRetentionPolicy)\n\tif err != nil {\n\t\treturn LongTermRetentionPoliciesCreateOrUpdateResponse{}, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}",
"func (p *ServerBlobAuditingPoliciesCreateOrUpdatePoller) FinalResponse(ctx context.Context) (ServerBlobAuditingPoliciesCreateOrUpdateResponse, error) {\n\trespType := ServerBlobAuditingPoliciesCreateOrUpdateResponse{}\n\tresp, err := p.pt.FinalResponse(ctx, &respType.ServerBlobAuditingPolicy)\n\tif err != nil {\n\t\treturn ServerBlobAuditingPoliciesCreateOrUpdateResponse{}, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}",
"func (p *ExtendedServerBlobAuditingPoliciesCreateOrUpdatePoller) FinalResponse(ctx context.Context) (ExtendedServerBlobAuditingPoliciesCreateOrUpdateResponse, error) {\n\trespType := ExtendedServerBlobAuditingPoliciesCreateOrUpdateResponse{}\n\tresp, err := p.pt.FinalResponse(ctx, &respType.ExtendedServerBlobAuditingPolicy)\n\tif err != nil {\n\t\treturn ExtendedServerBlobAuditingPoliciesCreateOrUpdateResponse{}, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}",
"func (p *ManagedServerSecurityAlertPoliciesCreateOrUpdatePoller) FinalResponse(ctx context.Context) (ManagedServerSecurityAlertPoliciesCreateOrUpdateResponse, error) {\n\trespType := ManagedServerSecurityAlertPoliciesCreateOrUpdateResponse{}\n\tresp, err := p.pt.FinalResponse(ctx, &respType.ManagedServerSecurityAlertPolicy)\n\tif err != nil {\n\t\treturn ManagedServerSecurityAlertPoliciesCreateOrUpdateResponse{}, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}",
"func NewWindowsUpdatesUpdatePoliciesItemAudienceMicrosoftGraphWindowsUpdatesUpdateAudienceUpdateAudiencePostRequestBody()(*WindowsUpdatesUpdatePoliciesItemAudienceMicrosoftGraphWindowsUpdatesUpdateAudienceUpdateAudiencePostRequestBody) {\n m := &WindowsUpdatesUpdatePoliciesItemAudienceMicrosoftGraphWindowsUpdatesUpdateAudienceUpdateAudiencePostRequestBody{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}",
"func mockPolicies() []*po.Schema {\n\treturn []*po.Schema{\n\t\t{\n\t\t\tID: 1,\n\t\t\tName: \"manual_policy\",\n\t\t\tDescription: \"for testing\",\n\t\t\tProjectID: 1,\n\t\t\tProviderID: 1,\n\t\t\tFilters: []*po.Filter{\n\t\t\t\t{\n\t\t\t\t\tType: po.FilterTypeRepository,\n\t\t\t\t\tValue: \"sub/**\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: po.FilterTypeTag,\n\t\t\t\t\tValue: \"prod*\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: po.FilterTypeLabel,\n\t\t\t\t\tValue: \"approved,ready\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tTrigger: &po.Trigger{\n\t\t\t\tType: po.TriggerTypeManual,\n\t\t\t},\n\t\t\tEnabled: true,\n\t\t\tCreatedAt: time.Now().UTC(),\n\t\t\tUpdatedTime: time.Now().UTC(),\n\t\t}, {\n\t\t\tID: 2,\n\t\t\tName: \"event_based_policy\",\n\t\t\tDescription: \"for testing\",\n\t\t\tProjectID: 1,\n\t\t\tProviderID: 1,\n\t\t\tFilters: []*po.Filter{\n\t\t\t\t{\n\t\t\t\t\tType: po.FilterTypeRepository,\n\t\t\t\t\tValue: \"busy*\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: po.FilterTypeTag,\n\t\t\t\t\tValue: \"stage*\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: po.FilterTypeLabel,\n\t\t\t\t\tValue: \"staged\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tTrigger: &po.Trigger{\n\t\t\t\tType: po.TriggerTypeEventBased,\n\t\t\t},\n\t\t\tEnabled: true,\n\t\t\tCreatedAt: time.Now().UTC(),\n\t\t\tUpdatedTime: time.Now().UTC(),\n\t\t},\n\t}\n}",
"func (p *ManagedBackupShortTermRetentionPoliciesCreateOrUpdatePoller) FinalResponse(ctx context.Context) (ManagedBackupShortTermRetentionPoliciesCreateOrUpdateResponse, error) {\n\trespType := ManagedBackupShortTermRetentionPoliciesCreateOrUpdateResponse{}\n\tresp, err := p.pt.FinalResponse(ctx, &respType.ManagedBackupShortTermRetentionPolicy)\n\tif err != nil {\n\t\treturn ManagedBackupShortTermRetentionPoliciesCreateOrUpdateResponse{}, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}",
"func (p *ManagedInstanceLongTermRetentionPoliciesCreateOrUpdatePoller) FinalResponse(ctx context.Context) (ManagedInstanceLongTermRetentionPoliciesCreateOrUpdateResponse, error) {\n\trespType := ManagedInstanceLongTermRetentionPoliciesCreateOrUpdateResponse{}\n\tresp, err := p.pt.FinalResponse(ctx, &respType.ManagedInstanceLongTermRetentionPolicy)\n\tif err != nil {\n\t\treturn ManagedInstanceLongTermRetentionPoliciesCreateOrUpdateResponse{}, err\n\t}\n\trespType.RawResponse = resp\n\treturn respType, nil\n}",
"func newUpdateEndpointPolicyUpdateEndpointPolicyRequest(ctx context.Context, f *EndpointPolicy, c *Client) (map[string]interface{}, error) {\n\treq := map[string]interface{}{}\n\tres := f\n\t_ = res\n\n\tif v := f.Labels; !dcl.IsEmptyValueIndirect(v) {\n\t\treq[\"labels\"] = v\n\t}\n\tif v := f.Type; !dcl.IsEmptyValueIndirect(v) {\n\t\treq[\"type\"] = v\n\t}\n\tif v := f.AuthorizationPolicy; !dcl.IsEmptyValueIndirect(v) {\n\t\treq[\"authorizationPolicy\"] = v\n\t}\n\tif v, err := expandEndpointPolicyEndpointMatcher(c, f.EndpointMatcher, res); err != nil {\n\t\treturn nil, fmt.Errorf(\"error expanding EndpointMatcher into endpointMatcher: %w\", err)\n\t} else if !dcl.IsEmptyValueIndirect(v) {\n\t\treq[\"endpointMatcher\"] = v\n\t}\n\tif v, err := expandEndpointPolicyTrafficPortSelector(c, f.TrafficPortSelector, res); err != nil {\n\t\treturn nil, fmt.Errorf(\"error expanding TrafficPortSelector into trafficPortSelector: %w\", err)\n\t} else if !dcl.IsEmptyValueIndirect(v) {\n\t\treq[\"trafficPortSelector\"] = v\n\t}\n\tif v := f.Description; !dcl.IsEmptyValueIndirect(v) {\n\t\treq[\"description\"] = v\n\t}\n\tif v := f.ServerTlsPolicy; !dcl.IsEmptyValueIndirect(v) {\n\t\treq[\"serverTlsPolicy\"] = v\n\t}\n\tif v := f.ClientTlsPolicy; !dcl.IsEmptyValueIndirect(v) {\n\t\treq[\"clientTlsPolicy\"] = v\n\t}\n\treturn req, nil\n}",
"func (c *Client) SetPolicies(ctx context.Context, idOrAlias string, setRotationPolicy bool, rotationInterval int, setDualAuthDeletePolicy, dualAuthEnable bool, rotationEnable ...bool) ([]Policy, error) {\n\t/*\n\t Setting the value of rotationInterval to -1 in case user passes 0 value as we want to retain the param `interval_month` after marshalling\n\t so that we can get correct error msg from REST API saying interval_month should be between 1 to 12\n\t Otherwise the param would not be sent to REST API in case of value 0 and it would throw error saying interval_month is missing\n\t*/\n\tif rotationInterval == 0 {\n\t\trotationInterval = -1\n\t}\n\tvar enable *bool\n\tif rotationEnable != nil {\n\t\tenable = &rotationEnable[0]\n\t}\n\tpolicies := []Policy{}\n\tif setRotationPolicy {\n\t\trotationPolicy := Policy{\n\t\t\tType: policyType,\n\t\t\tRotation: &Rotation{\n\t\t\t\tEnabled: enable,\n\t\t\t\tInterval: rotationInterval,\n\t\t\t},\n\t\t}\n\t\tpolicies = append(policies, rotationPolicy)\n\t}\n\tif setDualAuthDeletePolicy {\n\t\tdulaAuthPolicy := Policy{\n\t\t\tType: policyType,\n\t\t\tDualAuth: &DualAuth{\n\t\t\t\tEnabled: &dualAuthEnable,\n\t\t\t},\n\t\t}\n\t\tpolicies = append(policies, dulaAuthPolicy)\n\t}\n\n\tpolicyRequest := Policies{\n\t\tMetadata: PoliciesMetadata{\n\t\t\tCollectionType: policyType,\n\t\t\tNumberOfPolicies: len(policies),\n\t\t},\n\t\tPolicies: policies,\n\t}\n\n\tpolicyresponse := Policies{}\n\n\treq, err := c.newRequest(\"PUT\", fmt.Sprintf(\"keys/%s/policies\", idOrAlias), &policyRequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = c.do(ctx, req, &policyresponse)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn policyresponse.Policies, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
IsRedirect returns true when this update r t response policies o k response has a 3xx status code | func (o *UpdateRTResponsePoliciesOK) IsRedirect() bool {
return false
} | [
"func (o *UpdateRTResponsePoliciesNotFound) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesBadRequest) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesInternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *SnapmirrorPolicyModifyDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *UpdateRTResponsePoliciesForbidden) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesTooManyRequests) IsRedirect() bool {\n\treturn false\n}",
"func (o *SoftwareGetDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *QueryCombinedRTResponsePoliciesInternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesBadRequest) IsRedirect() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesOK) IsRedirect() bool {\n\treturn false\n}",
"func (o *AutosupportModifyDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *FabricGetDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *FlexcacheOriginGetDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *VolumeEfficiencyPolicyModifyDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *FcpServiceModifyDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *GetRunImportanceDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *QueryCombinedRTResponsePoliciesForbidden) IsRedirect() bool {\n\treturn false\n}",
"func (o *CreatePresetDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *TenancyTenantsReadDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
IsCode returns true when this update r t response policies o k response a status code equal to that given | func (o *UpdateRTResponsePoliciesOK) IsCode(code int) bool {
return code == 200
} | [
"func (o *UpdateExtensionOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *UpdateDetectsByIdsV2OK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *DryRunPolicyUsingPOST2OK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *UpdateRTResponsePoliciesInternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *QueryCombinedRTResponsePoliciesOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *VolumeEfficiencyPolicyModifyOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *UpdateConnectionOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *UpdateMTOServiceItemOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *UpdateRegistryEntitiesOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *GetGitWebhookByIDUsingGETOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *SoftwareGetOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *GetDeploymentRequestsUsingGET2OK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *GetEndpointIDOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *UpdateRulesV1OK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *MultiAdminVerifyConfigModifyOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *FlexcacheOriginGetOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *AutosupportModifyOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *UpdatePlanByIDOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *StoragePoolModifyOK) IsCode(code int) bool {\n\treturn code == 200\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Code gets the status code for the update r t response policies o k response | func (o *UpdateRTResponsePoliciesOK) Code() int {
return 200
} | [
"func (o *UpdateRTResponsePoliciesForbidden) Code() int {\n\treturn 403\n}",
"func (o *UpdateRTResponsePoliciesInternalServerError) Code() int {\n\treturn 500\n}",
"func (o *UpdateRTResponsePoliciesNotFound) Code() int {\n\treturn 404\n}",
"func (o *UpdateRTResponsePoliciesBadRequest) Code() int {\n\treturn 400\n}",
"func (o *QueryCombinedRTResponsePoliciesOK) Code() int {\n\treturn 200\n}",
"func (o *UpdateRulesV1OK) Code() int {\n\treturn 200\n}",
"func (o *SnapmirrorPolicyModifyDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *PatchKvmPoliciesMoidDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *GetRTResponsePoliciesDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *UpdateMTOServiceItemOK) Code() int {\n\treturn 200\n}",
"func (o *UpdateRulesV1Unauthorized) Code() int {\n\treturn 401\n}",
"func (o *UpdateHTTPSettingsDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *UpdateExtensionOK) Code() int {\n\treturn 200\n}",
"func (o *UpdateRTResponsePoliciesTooManyRequests) Code() int {\n\treturn 429\n}",
"func (o *UpdateModeDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *QueryCombinedRTResponsePoliciesForbidden) Code() int {\n\treturn 403\n}",
"func (o *VolumeEfficiencyPolicyModifyDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *UpdateCSPMPolicySettingsDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *QueryCombinedRTResponsePoliciesInternalServerError) Code() int {\n\treturn 500\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewUpdateRTResponsePoliciesBadRequest creates a UpdateRTResponsePoliciesBadRequest with default headers values | func NewUpdateRTResponsePoliciesBadRequest() *UpdateRTResponsePoliciesBadRequest {
return &UpdateRTResponsePoliciesBadRequest{}
} | [
"func (o *UpdateRTResponsePoliciesBadRequest) Code() int {\n\treturn 400\n}",
"func NewGetRTResponsePoliciesDefault(code int) *GetRTResponsePoliciesDefault {\n\treturn &GetRTResponsePoliciesDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func NewUpdateSecurityPolicyBadRequest() *UpdateSecurityPolicyBadRequest {\n\treturn &UpdateSecurityPolicyBadRequest{}\n}",
"func (o *QueryCombinedRTResponsePoliciesBadRequest) Code() int {\n\treturn 400\n}",
"func NewReplaceHTTPAfterResponseRuleBadRequest() *ReplaceHTTPAfterResponseRuleBadRequest {\n\n\treturn &ReplaceHTTPAfterResponseRuleBadRequest{}\n}",
"func NewCreateHTTPResponseRuleBadRequest() *CreateHTTPResponseRuleBadRequest {\n\n\treturn &CreateHTTPResponseRuleBadRequest{}\n}",
"func NewInitiateVersionControlUpdateBadRequest() *InitiateVersionControlUpdateBadRequest {\n\treturn &InitiateVersionControlUpdateBadRequest{}\n}",
"func NewGetReplicationPoliciesBadRequest() *GetReplicationPoliciesBadRequest {\n\treturn &GetReplicationPoliciesBadRequest{}\n}",
"func NewCreateLocalizedPolicyVersionBadRequest() *CreateLocalizedPolicyVersionBadRequest {\n\treturn &CreateLocalizedPolicyVersionBadRequest{}\n}",
"func NewPutReplicationPoliciesIDBadRequest() *PutReplicationPoliciesIDBadRequest {\n\treturn &PutReplicationPoliciesIDBadRequest{}\n}",
"func NewUpdateInterruptionResponsibilitySpacesBadRequest() *UpdateInterruptionResponsibilitySpacesBadRequest {\n\treturn &UpdateInterruptionResponsibilitySpacesBadRequest{}\n}",
"func NewPostKeysBadRequest() *PostKeysBadRequest {\n\treturn &PostKeysBadRequest{}\n}",
"func NewGetAPIPublicV1TeamTeamPoliciesBadRequest() *GetAPIPublicV1TeamTeamPoliciesBadRequest {\n\treturn &GetAPIPublicV1TeamTeamPoliciesBadRequest{}\n}",
"func NewCreateRootRegionOverrideBadRequest() *CreateRootRegionOverrideBadRequest {\n\treturn &CreateRootRegionOverrideBadRequest{}\n}",
"func NewPostPaymentdefaultsBadRequest() *PostPaymentdefaultsBadRequest {\n\treturn &PostPaymentdefaultsBadRequest{}\n}",
"func NewModifyUpdateBadRequestResponseBody(res *goa.ServiceError) *ModifyUpdateBadRequestResponseBody {\n\tbody := &ModifyUpdateBadRequestResponseBody{\n\t\tName: res.Name,\n\t\tID: res.ID,\n\t\tMessage: res.Message,\n\t\tTemporary: res.Temporary,\n\t\tTimeout: res.Timeout,\n\t\tFault: res.Fault,\n\t}\n\treturn body\n}",
"func NewCreateSecurityPolicyBadRequest() *CreateSecurityPolicyBadRequest {\n\treturn &CreateSecurityPolicyBadRequest{}\n}",
"func NewUpdateTechBadRequest(body *UpdateTechBadRequestResponseBody) *goa.ServiceError {\n\tv := &goa.ServiceError{\n\t\tName: *body.Name,\n\t\tID: *body.ID,\n\t\tMessage: *body.Message,\n\t\tTemporary: *body.Temporary,\n\t\tTimeout: *body.Timeout,\n\t\tFault: *body.Fault,\n\t}\n\n\treturn v\n}",
"func NewSpaceAncestorsBadRequest() *SpaceAncestorsBadRequest {\n\treturn &SpaceAncestorsBadRequest{}\n}",
"func NewUpdateBucketBadRequest() *UpdateBucketBadRequest {\n\treturn &UpdateBucketBadRequest{}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
IsSuccess returns true when this update r t response policies bad request response has a 2xx status code | func (o *UpdateRTResponsePoliciesBadRequest) IsSuccess() bool {
return false
} | [
"func (o *UpdateRTResponsePoliciesInternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesBadRequest) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateMTOServiceItemBadRequest) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *GetDeploymentRequestsUsingGET2Unauthorized) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateMTOServiceItemUnauthorized) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateMTOServiceItemInternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesTooManyRequests) IsSuccess() bool {\n\treturn false\n}",
"func (o *GetGitWebhookByIDUsingGETInternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateDetectsByIdsV2BadRequest) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesNotFound) IsSuccess() bool {\n\treturn false\n}",
"func (o *PcloudIpsecpoliciesPostBadRequest) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateExtensionBadRequest) IsSuccess() bool {\n\treturn false\n}",
"func (o *AddUpstreamBadRequest) IsSuccess() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesInternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *ListEventLoopBadRequest) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateRegistryEntitiesBadRequest) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateDetectsByIdsV2InternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *CreateServiceCredentialsUsingPOSTBadRequest) IsSuccess() bool {\n\treturn false\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
IsRedirect returns true when this update r t response policies bad request response has a 3xx status code | func (o *UpdateRTResponsePoliciesBadRequest) IsRedirect() bool {
return false
} | [
"func (o *QueryCombinedRTResponsePoliciesBadRequest) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesNotFound) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesInternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesTooManyRequests) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesForbidden) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesOK) IsRedirect() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesInternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *GetDomainsUsingGETBadRequest) IsRedirect() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesTooManyRequests) IsRedirect() bool {\n\treturn false\n}",
"func (o *ExtractionGetV1BadRequest) IsRedirect() bool {\n\treturn false\n}",
"func (o *ExtractionGetV1TooManyRequests) IsRedirect() bool {\n\treturn false\n}",
"func (o *SoftwareGetDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *VolumeEfficiencyPolicyModifyDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *GetDeviceControlPoliciesInternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *SnapmirrorPolicyModifyDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *UpdateDetectsByIdsV2BadRequest) IsRedirect() bool {\n\treturn false\n}",
"func (o *AutosupportModifyDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *CreateDeviceControlPoliciesBadRequest) IsRedirect() bool {\n\treturn false\n}",
"func (o *ListMTOPaymentRequestsBadRequest) IsRedirect() bool {\n\treturn false\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
IsCode returns true when this update r t response policies bad request response a status code equal to that given | func (o *UpdateRTResponsePoliciesBadRequest) IsCode(code int) bool {
return code == 400
} | [
"func (o *QueryCombinedRTResponsePoliciesBadRequest) IsCode(code int) bool {\n\treturn code == 400\n}",
"func (o *UpdateRTResponsePoliciesOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *UpdateRTResponsePoliciesInternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *GetDomainsUsingGETBadRequest) IsCode(code int) bool {\n\treturn code == 400\n}",
"func (o *UpdateRTResponsePoliciesTooManyRequests) IsCode(code int) bool {\n\treturn code == 429\n}",
"func (o *DryRunPolicyUsingPOST2OK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *QueryCombinedRTResponsePoliciesInternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *DryRunPolicyUsingPOST2BadRequest) IsCode(code int) bool {\n\treturn code == 400\n}",
"func (o *BatchGetCmdStatusBadRequest) IsCode(code int) bool {\n\treturn code == 400\n}",
"func (o *GetGitWebhookByIDUsingGETInternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *CreateDeviceControlPoliciesBadRequest) IsCode(code int) bool {\n\treturn code == 400\n}",
"func (o *GetDeploymentRequestsUsingGET2OK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *UndoChangeSubscriptionPlanBadRequest) IsCode(code int) bool {\n\treturn code == 400\n}",
"func (o *UpdateRTResponsePoliciesForbidden) IsCode(code int) bool {\n\treturn code == 403\n}",
"func (o *UpdateDetectsByIdsV2BadRequest) IsCode(code int) bool {\n\treturn code == 400\n}",
"func (o *UpdateRTResponsePoliciesNotFound) IsCode(code int) bool {\n\treturn code == 404\n}",
"func (o *RecordUsageBadRequest) IsCode(code int) bool {\n\treturn code == 400\n}",
"func (o *UpdateMTOServiceItemBadRequest) IsCode(code int) bool {\n\treturn code == 400\n}",
"func (o *ListEventLoopBadRequest) IsCode(code int) bool {\n\treturn code == 400\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Code gets the status code for the update r t response policies bad request response | func (o *UpdateRTResponsePoliciesBadRequest) Code() int {
return 400
} | [
"func (o *UpdateRTResponsePoliciesOK) Code() int {\n\treturn 200\n}",
"func (o *UpdateRTResponsePoliciesInternalServerError) Code() int {\n\treturn 500\n}",
"func (o *UpdateRTResponsePoliciesForbidden) Code() int {\n\treturn 403\n}",
"func (o *UpdateRTResponsePoliciesNotFound) Code() int {\n\treturn 404\n}",
"func (o *UpdateMTOServiceItemBadRequest) Code() int {\n\treturn 400\n}",
"func (o *UpdateExtensionBadRequest) Code() int {\n\treturn 400\n}",
"func (o *UpdateRTResponsePoliciesTooManyRequests) Code() int {\n\treturn 429\n}",
"func (o *QueryCombinedRTResponsePoliciesBadRequest) Code() int {\n\treturn 400\n}",
"func (o *UpdateRulesV1BadRequest) Code() int {\n\treturn 400\n}",
"func (o *UpdateRulesV1OK) Code() int {\n\treturn 200\n}",
"func (o *UpdateMTOServiceItemOK) Code() int {\n\treturn 200\n}",
"func (o *UpdateHTTPSettingsDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *UpdateConnectionForbidden) Code() int {\n\treturn 403\n}",
"func (o *UpdateRulesV1InternalServerError) Code() int {\n\treturn 500\n}",
"func (o *BatchGetCmdStatusBadRequest) Code() int {\n\treturn 400\n}",
"func (o *UpdateRulesV1Unauthorized) Code() int {\n\treturn 401\n}",
"func (o *UpdateExtensionOK) Code() int {\n\treturn 200\n}",
"func (o *QueryCombinedRTResponsePoliciesInternalServerError) Code() int {\n\treturn 500\n}",
"func (o *UpdateMTOServiceItemUnprocessableEntity) Code() int {\n\treturn 422\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewUpdateRTResponsePoliciesForbidden creates a UpdateRTResponsePoliciesForbidden with default headers values | func NewUpdateRTResponsePoliciesForbidden() *UpdateRTResponsePoliciesForbidden {
return &UpdateRTResponsePoliciesForbidden{}
} | [
"func NewGetRTResponsePoliciesDefault(code int) *GetRTResponsePoliciesDefault {\n\treturn &GetRTResponsePoliciesDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func NewPostRoutingpoliciesForbidden() *PostRoutingpoliciesForbidden {\n\treturn &PostRoutingpoliciesForbidden{}\n}",
"func (o *UpdateRTResponsePoliciesForbidden) Code() int {\n\treturn 403\n}",
"func NewForbidden(cause error) Forbidden { return Forbidden(cause.Error()) }",
"func NewPostResourceServersForbidden() *PostResourceServersForbidden {\n\treturn &PostResourceServersForbidden{}\n}",
"func NewGetReplicationPoliciesForbidden() *GetReplicationPoliciesForbidden {\n\treturn &GetReplicationPoliciesForbidden{}\n}",
"func NewInitiateVersionControlUpdateForbidden() *InitiateVersionControlUpdateForbidden {\n\treturn &InitiateVersionControlUpdateForbidden{}\n}",
"func NewPostKeysForbidden() *PostKeysForbidden {\n\treturn &PostKeysForbidden{}\n}",
"func AddToExistingPolicies(aReview *v1.AdmissionReview) RuleResponse {\n\tvar rr RuleResponse\n\tvar aPolicy AdmissionPolicy\n\n\taJSON, _ := aReview.Request.Object.MarshalJSON()\n\n\terr := json.Unmarshal(aJSON, &aPolicy)\n\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t}\n\n\tfor _, rule := range aPolicy.Spec.Rules {\n\t\tvm := goja.New()\n\t\t_, err = vm.RunString(rule.Rule)\n\t\tif err != nil {\n\t\t\trr.Allowed = false\n\t\t\trr.Message = \"Policy did not pass checks. Check for errors in policy\"\n\t\t\trr.Status = \"Failure\"\n\n\t\t\treturn rr\n\t\t}\n\t}\n\n\tlogger.LogStuff(\"Policy Review Object is:\", aReview)\n\n\tlogger.LogStuff(\"Policy is: \", aPolicy)\n\n\tAdmissionPolicies = append(AdmissionPolicies, aPolicy)\n\n\trr.Allowed = true\n\trr.Message = \"Policy loaded\"\n\trr.Status = \"Success\"\n\n\tPrintPolicies()\n\n\treturn rr\n\n}",
"func NewUpdateRobotForbidden() *UpdateRobotForbidden {\n\treturn &UpdateRobotForbidden{}\n}",
"func NewUpdateRoutingRuleForbidden() *UpdateRoutingRuleForbidden {\n\treturn &UpdateRoutingRuleForbidden{}\n}",
"func (c *Client) SetPolicies(ctx context.Context, idOrAlias string, setRotationPolicy bool, rotationInterval int, setDualAuthDeletePolicy, dualAuthEnable bool, rotationEnable ...bool) ([]Policy, error) {\n\t/*\n\t Setting the value of rotationInterval to -1 in case user passes 0 value as we want to retain the param `interval_month` after marshalling\n\t so that we can get correct error msg from REST API saying interval_month should be between 1 to 12\n\t Otherwise the param would not be sent to REST API in case of value 0 and it would throw error saying interval_month is missing\n\t*/\n\tif rotationInterval == 0 {\n\t\trotationInterval = -1\n\t}\n\tvar enable *bool\n\tif rotationEnable != nil {\n\t\tenable = &rotationEnable[0]\n\t}\n\tpolicies := []Policy{}\n\tif setRotationPolicy {\n\t\trotationPolicy := Policy{\n\t\t\tType: policyType,\n\t\t\tRotation: &Rotation{\n\t\t\t\tEnabled: enable,\n\t\t\t\tInterval: rotationInterval,\n\t\t\t},\n\t\t}\n\t\tpolicies = append(policies, rotationPolicy)\n\t}\n\tif setDualAuthDeletePolicy {\n\t\tdulaAuthPolicy := Policy{\n\t\t\tType: policyType,\n\t\t\tDualAuth: &DualAuth{\n\t\t\t\tEnabled: &dualAuthEnable,\n\t\t\t},\n\t\t}\n\t\tpolicies = append(policies, dulaAuthPolicy)\n\t}\n\n\tpolicyRequest := Policies{\n\t\tMetadata: PoliciesMetadata{\n\t\t\tCollectionType: policyType,\n\t\t\tNumberOfPolicies: len(policies),\n\t\t},\n\t\tPolicies: policies,\n\t}\n\n\tpolicyresponse := Policies{}\n\n\treq, err := c.newRequest(\"PUT\", fmt.Sprintf(\"keys/%s/policies\", idOrAlias), &policyRequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = c.do(ctx, req, &policyresponse)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn policyresponse.Policies, nil\n}",
"func NewWeaviateSchemaThingsPropertiesUpdateForbidden() *WeaviateSchemaThingsPropertiesUpdateForbidden {\n\n\treturn &WeaviateSchemaThingsPropertiesUpdateForbidden{}\n}",
"func NewModifyUpdateForbiddenResponseBody(res *goa.ServiceError) *ModifyUpdateForbiddenResponseBody {\n\tbody := &ModifyUpdateForbiddenResponseBody{\n\t\tName: res.Name,\n\t\tID: res.ID,\n\t\tMessage: res.Message,\n\t\tTemporary: res.Temporary,\n\t\tTimeout: res.Timeout,\n\t\tFault: res.Fault,\n\t}\n\treturn body\n}",
"func NewPostUpdateSoftwareComponentsForbidden() *PostUpdateSoftwareComponentsForbidden {\n\treturn &PostUpdateSoftwareComponentsForbidden{}\n}",
"func NewPostUpdateSoftwareLicensesForbidden() *PostUpdateSoftwareLicensesForbidden {\n\treturn &PostUpdateSoftwareLicensesForbidden{}\n}",
"func NewUpdateRoutingFileForbidden() *UpdateRoutingFileForbidden {\n\treturn &UpdateRoutingFileForbidden{}\n}",
"func NewUpdateModeForbidden() *UpdateModeForbidden {\n\treturn &UpdateModeForbidden{}\n}",
"func DescribeResourcePolicies(d *Describer, resPolicies *v1.TypedLocalObjectReference) {\n\td.Printf(\"Resource policies:\\n\")\n\td.Printf(\"\\tType:\\t%s\\n\", resPolicies.Kind)\n\td.Printf(\"\\tName:\\t%s\\n\", resPolicies.Name)\n}",
"func NewUpdateRegistryEntitiesForbidden() *UpdateRegistryEntitiesForbidden {\n\treturn &UpdateRegistryEntitiesForbidden{}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
IsSuccess returns true when this update r t response policies forbidden response has a 2xx status code | func (o *UpdateRTResponsePoliciesForbidden) IsSuccess() bool {
return false
} | [
"func (o *UpdateRTResponsePoliciesBadRequest) IsSuccess() bool {\n\treturn false\n}",
"func (o *GetGitWebhookByIDUsingGETForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateMTOServiceItemForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateRepository39Forbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *GetAgreementForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *BatchGetCmdStatusForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateAzureCloudAccountAsyncForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateTeamMemberForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *PcloudIpsecpoliciesPostForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateConnectionForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesInternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateDetectsByIdsV2Forbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *GetTerraformVersionUsingGETForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *PcloudImagesGetForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *ForceReleaseForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *ListMTOPaymentRequestsForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateOrganizationMemberForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *CreateServiceCredentialsUsingPOSTForbidden) IsSuccess() bool {\n\treturn false\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
IsRedirect returns true when this update r t response policies forbidden response has a 3xx status code | func (o *UpdateRTResponsePoliciesForbidden) IsRedirect() bool {
return false
} | [
"func (o *UpdateRTResponsePoliciesBadRequest) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesInternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesNotFound) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesTooManyRequests) IsRedirect() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesForbidden) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesOK) IsRedirect() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesBadRequest) IsRedirect() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesInternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesTooManyRequests) IsRedirect() bool {\n\treturn false\n}",
"func (o *VolumeEfficiencyPolicyModifyDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *GetTerraformVersionUsingGETForbidden) IsRedirect() bool {\n\treturn false\n}",
"func (o *GetAgreementForbidden) IsRedirect() bool {\n\treturn false\n}",
"func (o *SnapmirrorPolicyModifyDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *ExtractionGetV1Forbidden) IsRedirect() bool {\n\treturn false\n}",
"func (o *GetDeviceControlPoliciesInternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *GetDomainsUsingGETBadRequest) IsRedirect() bool {\n\treturn false\n}",
"func (o *ExtractionGetV1TooManyRequests) IsRedirect() bool {\n\treturn false\n}",
"func (o *GetGitWebhookByIDUsingGETForbidden) IsRedirect() bool {\n\treturn false\n}",
"func (o *GetAgreementInternalServerError) IsRedirect() bool {\n\treturn false\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
IsCode returns true when this update r t response policies forbidden response a status code equal to that given | func (o *UpdateRTResponsePoliciesForbidden) IsCode(code int) bool {
return code == 403
} | [
"func (o *QueryCombinedRTResponsePoliciesForbidden) IsCode(code int) bool {\n\treturn code == 403\n}",
"func (o *GetGitWebhookByIDUsingGETForbidden) IsCode(code int) bool {\n\treturn code == 403\n}",
"func (o *BatchGetCmdStatusForbidden) IsCode(code int) bool {\n\treturn code == 403\n}",
"func (o *UpdateConnectionForbidden) IsCode(code int) bool {\n\treturn code == 403\n}",
"func (o *NotifyRunStatusForbidden) IsCode(code int) bool {\n\treturn code == 403\n}",
"func (o *UpdateDetectsByIdsV2Forbidden) IsCode(code int) bool {\n\treturn code == 403\n}",
"func (o *PatchEntitiesAlertsV2Forbidden) IsCode(code int) bool {\n\treturn code == 403\n}",
"func (o *DryRunPolicyUsingPOST2Forbidden) IsCode(code int) bool {\n\treturn code == 403\n}",
"func (o *ForceReleaseForbidden) IsCode(code int) bool {\n\treturn code == 403\n}",
"func (o *UpdateRTResponsePoliciesInternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *UpdateRTResponsePoliciesOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *GetTerraformVersionUsingGETForbidden) IsCode(code int) bool {\n\treturn code == 403\n}",
"func (o *UpdateRTResponsePoliciesTooManyRequests) IsCode(code int) bool {\n\treturn code == 429\n}",
"func (o *GetDomainsUsingGETForbidden) IsCode(code int) bool {\n\treturn code == 403\n}",
"func (o *UpdateMTOServiceItemForbidden) IsCode(code int) bool {\n\treturn code == 403\n}",
"func (o *V2UpdateHostIgnitionForbidden) IsCode(code int) bool {\n\treturn code == 403\n}",
"func (o *UpdateRTResponsePoliciesBadRequest) IsCode(code int) bool {\n\treturn code == 400\n}",
"func (o *UpdateRegistryEntitiesForbidden) IsCode(code int) bool {\n\treturn code == 403\n}",
"func (o *DryRunPolicyUsingPOST2OK) IsCode(code int) bool {\n\treturn code == 200\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Code gets the status code for the update r t response policies forbidden response | func (o *UpdateRTResponsePoliciesForbidden) Code() int {
return 403
} | [
"func (o *UpdateConnectionForbidden) Code() int {\n\treturn 403\n}",
"func (o *QueryCombinedRTResponsePoliciesForbidden) Code() int {\n\treturn 403\n}",
"func (o *UpdateMTOServiceItemForbidden) Code() int {\n\treturn 403\n}",
"func (o *UpdateRulesV1Forbidden) Code() int {\n\treturn 403\n}",
"func (o *PatchEntitiesAlertsV2Forbidden) Code() int {\n\treturn 403\n}",
"func (o *PatchProjectSettingsForbidden) Code() int {\n\treturn 403\n}",
"func (o *UpdateTeamMemberForbidden) Code() int {\n\treturn 403\n}",
"func (o *UpdateRegistryEntitiesForbidden) Code() int {\n\treturn 403\n}",
"func (o *BatchGetCmdStatusForbidden) Code() int {\n\treturn 403\n}",
"func (o *NotifyRunStatusForbidden) Code() int {\n\treturn 403\n}",
"func (o *UpdateRoutingRuleForbidden) Code() int {\n\treturn 403\n}",
"func (o *UpdateRepository39Forbidden) Code() int {\n\treturn 403\n}",
"func (o *ForceReleaseForbidden) Code() int {\n\treturn 403\n}",
"func (o *RTRDeletePutFilesForbidden) Code() int {\n\treturn 403\n}",
"func (o *UpdateDetectsByIdsV2Forbidden) Code() int {\n\treturn 403\n}",
"func (o *UpdateOrganizationMemberForbidden) Code() int {\n\treturn 403\n}",
"func (o *UpdateRTResponsePoliciesOK) Code() int {\n\treturn 200\n}",
"func (o *UpdateRTResponsePoliciesInternalServerError) Code() int {\n\treturn 500\n}",
"func (o *CreateVersionForbidden) Code() int {\n\treturn 403\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewUpdateRTResponsePoliciesNotFound creates a UpdateRTResponsePoliciesNotFound with default headers values | func NewUpdateRTResponsePoliciesNotFound() *UpdateRTResponsePoliciesNotFound {
return &UpdateRTResponsePoliciesNotFound{}
} | [
"func NewGetRTResponsePoliciesDefault(code int) *GetRTResponsePoliciesDefault {\n\treturn &GetRTResponsePoliciesDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func (o *UpdateRTResponsePoliciesNotFound) Code() int {\n\treturn 404\n}",
"func NewPostRoutingpoliciesNotFound() *PostRoutingpoliciesNotFound {\n\treturn &PostRoutingpoliciesNotFound{}\n}",
"func NewUpdateSecurityPolicyNotFound() *UpdateSecurityPolicyNotFound {\n\treturn &UpdateSecurityPolicyNotFound{}\n}",
"func AddToExistingPolicies(aReview *v1.AdmissionReview) RuleResponse {\n\tvar rr RuleResponse\n\tvar aPolicy AdmissionPolicy\n\n\taJSON, _ := aReview.Request.Object.MarshalJSON()\n\n\terr := json.Unmarshal(aJSON, &aPolicy)\n\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t}\n\n\tfor _, rule := range aPolicy.Spec.Rules {\n\t\tvm := goja.New()\n\t\t_, err = vm.RunString(rule.Rule)\n\t\tif err != nil {\n\t\t\trr.Allowed = false\n\t\t\trr.Message = \"Policy did not pass checks. Check for errors in policy\"\n\t\t\trr.Status = \"Failure\"\n\n\t\t\treturn rr\n\t\t}\n\t}\n\n\tlogger.LogStuff(\"Policy Review Object is:\", aReview)\n\n\tlogger.LogStuff(\"Policy is: \", aPolicy)\n\n\tAdmissionPolicies = append(AdmissionPolicies, aPolicy)\n\n\trr.Allowed = true\n\trr.Message = \"Policy loaded\"\n\trr.Status = \"Success\"\n\n\tPrintPolicies()\n\n\treturn rr\n\n}",
"func TestAddRevokePolicyNoFailure(t *testing.T) {\n\t// Configure the server as a seed.\n\ts1Config := getTestConfig(\"a\", true, 5050)\n\ts1 := runServerWithConfig(t, s1Config)\n\tdefer s1.Stop()\n\n\t// Wait to elect self as leader.\n\tgetMetadataLeader(t, 10*time.Second, s1)\n\n\t// Monkey-patch authzEnforcer to be nil\n\ts1.authzEnforcer = nil\n\n\t// Apply Raft log to AddPolicy\n\terr := s1.applyAddPolicy(\"a\", \"b\", \"read\")\n\t// Expect no failure\n\trequire.NoError(t, err)\n\n\t// Apply Raft log to RevokePolicy\n\terr = s1.applyRevokePolicy(\"a\", \"b\", \"read\")\n\t// Expect no failure\n\trequire.NoError(t, err)\n}",
"func NewReplaceHTTPAfterResponseRuleNotFound() *ReplaceHTTPAfterResponseRuleNotFound {\n\n\treturn &ReplaceHTTPAfterResponseRuleNotFound{}\n}",
"func newRetryPolicyWithOptionsNoDefault(opts ...RetryPolicyOption) RetryPolicy {\n\trp := &RetryPolicy{}\n\n\t// then allow changing values\n\tfor _, opt := range opts {\n\t\topt(rp)\n\t}\n\n\tif rp.DeterminePolicyToUse == nil {\n\t\trp.DeterminePolicyToUse = returnSamePolicy\n\t}\n\n\treturn *rp\n}",
"func (ins *Instance) PolicyUpdate(resp *envoy_service_discovery.DiscoveryResponse) (err error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tvar ok bool\n\t\t\tif err, ok = r.(error); !ok {\n\t\t\t\terr = fmt.Errorf(\"NPDS: Panic: %v\", r)\n\t\t\t}\n\t\t}\n\t}()\n\n\tlogrus.Debugf(\"NPDS: Updating policy for version %s\", resp.VersionInfo)\n\n\toldMap := ins.getPolicyMap()\n\tnewMap := newPolicyMap()\n\n\tfor _, any := range resp.Resources {\n\t\tif any.TypeUrl != resp.TypeUrl {\n\t\t\treturn fmt.Errorf(\"NPDS: Mismatching TypeUrls: %s != %s\", any.TypeUrl, resp.TypeUrl)\n\t\t}\n\t\tvar config cilium.NetworkPolicy\n\t\tif err = proto.Unmarshal(any.Value, &config); err != nil {\n\t\t\treturn fmt.Errorf(\"NPDS: Policy unmarshal error: %v\", err)\n\t\t}\n\n\t\tips := config.GetEndpointIps()\n\t\tif len(ips) == 0 {\n\t\t\treturn fmt.Errorf(\"NPDS: Policy has no endpoint_ips\")\n\t\t}\n\t\tfor _, ip := range ips {\n\t\t\tlogrus.Debugf(\"NPDS: Endpoint IP: %s\", ip)\n\t\t}\n\t\t// Locate the old version, if any\n\t\toldPolicy, found := oldMap[ips[0]]\n\t\tif found {\n\t\t\t// Check if the new policy is the same as the old one\n\t\t\tif proto.Equal(&config, oldPolicy.protobuf) {\n\t\t\t\tlogrus.Debugf(\"NPDS: New policy for Endpoint %d is equal to the old one, no need to change\", config.GetEndpointId())\n\t\t\t\tfor _, ip := range ips {\n\t\t\t\t\tnewMap[ip] = oldPolicy\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// Validate new config\n\t\tif err = config.Validate(); err != nil {\n\t\t\treturn fmt.Errorf(\"NPDS: Policy validation error for Endpoint %d: %v\", config.GetEndpointId(), err)\n\t\t}\n\n\t\t// Create new PolicyInstance, may panic. Takes ownership of 'config'.\n\t\tnewPolicy := newPolicyInstance(&config)\n\t\tfor _, ip := range ips {\n\t\t\tnewMap[ip] = newPolicy\n\t\t}\n\t}\n\n\t// Store the new policy map\n\tins.setPolicyMap(newMap)\n\n\tlogrus.Debugf(\"NPDS: Policy Update completed for instance %d: %v\", ins.id, newMap)\n\treturn\n}",
"func resourceVolterraNetworkPolicyViewUpdate(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*APIClient)\n\n\tupdateMeta := &ves_io_schema.ObjectReplaceMetaType{}\n\tupdateSpec := &ves_io_schema_views_network_policy_view.ReplaceSpecType{}\n\tupdateReq := &ves_io_schema_views_network_policy_view.ReplaceRequest{\n\t\tMetadata: updateMeta,\n\t\tSpec: updateSpec,\n\t}\n\tif v, ok := d.GetOk(\"annotations\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tupdateMeta.Annotations = ms\n\t}\n\n\tif v, ok := d.GetOk(\"description\"); ok && !isIntfNil(v) {\n\t\tupdateMeta.Description =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"disable\"); ok && !isIntfNil(v) {\n\t\tupdateMeta.Disable =\n\t\t\tv.(bool)\n\t}\n\n\tif v, ok := d.GetOk(\"labels\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tupdateMeta.Labels = ms\n\t}\n\n\tif v, ok := d.GetOk(\"name\"); ok && !isIntfNil(v) {\n\t\tupdateMeta.Name =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"namespace\"); ok && !isIntfNil(v) {\n\t\tupdateMeta.Namespace =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"egress_rules\"); ok && !isIntfNil(v) {\n\n\t\tsl := v.([]interface{})\n\t\tegressRules := make([]*ves_io_schema_network_policy.NetworkPolicyRuleType, len(sl))\n\t\tupdateSpec.EgressRules = egressRules\n\t\tfor i, set := range sl {\n\t\t\tegressRules[i] = &ves_io_schema_network_policy.NetworkPolicyRuleType{}\n\t\t\tegressRulesMapStrToI := set.(map[string]interface{})\n\n\t\t\tif v, ok := egressRulesMapStrToI[\"action\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tegressRules[i].Action = ves_io_schema_network_policy_rule.NetworkPolicyRuleAction(ves_io_schema_network_policy_rule.NetworkPolicyRuleAction_value[v.(string)])\n\n\t\t\t}\n\n\t\t\tif w, ok := egressRulesMapStrToI[\"keys\"]; ok && !isIntfNil(w) {\n\t\t\t\tls := make([]string, len(w.([]interface{})))\n\t\t\t\tfor i, v := range w.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tegressRules[i].Keys = ls\n\t\t\t}\n\n\t\t\tif v, ok := egressRulesMapStrToI[\"label_matcher\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tlabelMatcher := &ves_io_schema.LabelMatcherType{}\n\t\t\t\tegressRules[i].LabelMatcher = labelMatcher\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tlabelMatcherMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\tif w, ok := labelMatcherMapStrToI[\"keys\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\tls := make([]string, len(w.([]interface{})))\n\t\t\t\t\t\tfor i, v := range w.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlabelMatcher.Keys = ls\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := egressRulesMapStrToI[\"metadata\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tmetadata := &ves_io_schema.MessageMetaType{}\n\t\t\t\tegressRules[i].Metadata = metadata\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tmetadataMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\tif w, ok := metadataMapStrToI[\"description\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\tmetadata.Description = w.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif w, ok := metadataMapStrToI[\"disable\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\tmetadata.Disable = w.(bool)\n\t\t\t\t\t}\n\n\t\t\t\t\tif w, ok := metadataMapStrToI[\"name\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\tmetadata.Name = w.(string)\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\totherEndpointTypeFound := false\n\n\t\t\tif v, ok := egressRulesMapStrToI[\"any\"]; ok && !isIntfNil(v) && !otherEndpointTypeFound {\n\n\t\t\t\totherEndpointTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\totherEndpointInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_Any{}\n\t\t\t\t\totherEndpointInt.Any = &ves_io_schema.Empty{}\n\t\t\t\t\tegressRules[i].OtherEndpoint = otherEndpointInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := egressRulesMapStrToI[\"inside_endpoints\"]; ok && !isIntfNil(v) && !otherEndpointTypeFound {\n\n\t\t\t\totherEndpointTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\totherEndpointInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_InsideEndpoints{}\n\t\t\t\t\totherEndpointInt.InsideEndpoints = &ves_io_schema.Empty{}\n\t\t\t\t\tegressRules[i].OtherEndpoint = otherEndpointInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := egressRulesMapStrToI[\"ip_prefix_set\"]; ok && !isIntfNil(v) && !otherEndpointTypeFound {\n\n\t\t\t\totherEndpointTypeFound = true\n\t\t\t\totherEndpointInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_IpPrefixSet{}\n\t\t\t\totherEndpointInt.IpPrefixSet = &ves_io_schema.IpPrefixSetRefType{}\n\t\t\t\tegressRules[i].OtherEndpoint = otherEndpointInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"ref\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.([]interface{})\n\t\t\t\t\t\trefInt := make([]*ves_io_schema.ObjectRefType, len(sl))\n\t\t\t\t\t\totherEndpointInt.IpPrefixSet.Ref = refInt\n\t\t\t\t\t\tfor i, ps := range sl {\n\n\t\t\t\t\t\t\trMapToStrVal := ps.(map[string]interface{})\n\t\t\t\t\t\t\trefInt[i] = &ves_io_schema.ObjectRefType{}\n\n\t\t\t\t\t\t\trefInt[i].Kind = \"ip_prefix_set\"\n\n\t\t\t\t\t\t\tif v, ok := rMapToStrVal[\"name\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\trefInt[i].Name = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := rMapToStrVal[\"namespace\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\trefInt[i].Namespace = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := rMapToStrVal[\"tenant\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\trefInt[i].Tenant = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := rMapToStrVal[\"uid\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\trefInt[i].Uid = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := egressRulesMapStrToI[\"label_selector\"]; ok && !isIntfNil(v) && !otherEndpointTypeFound {\n\n\t\t\t\totherEndpointTypeFound = true\n\t\t\t\totherEndpointInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_LabelSelector{}\n\t\t\t\totherEndpointInt.LabelSelector = &ves_io_schema.LabelSelectorType{}\n\t\t\t\tegressRules[i].OtherEndpoint = otherEndpointInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"expressions\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\totherEndpointInt.LabelSelector.Expressions = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := egressRulesMapStrToI[\"namespace\"]; ok && !isIntfNil(v) && !otherEndpointTypeFound {\n\n\t\t\t\totherEndpointTypeFound = true\n\t\t\t\totherEndpointInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_Namespace{}\n\n\t\t\t\tegressRules[i].OtherEndpoint = otherEndpointInt\n\n\t\t\t\totherEndpointInt.Namespace = v.(string)\n\n\t\t\t}\n\n\t\t\tif v, ok := egressRulesMapStrToI[\"outside_endpoints\"]; ok && !isIntfNil(v) && !otherEndpointTypeFound {\n\n\t\t\t\totherEndpointTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\totherEndpointInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_OutsideEndpoints{}\n\t\t\t\t\totherEndpointInt.OutsideEndpoints = &ves_io_schema.Empty{}\n\t\t\t\t\tegressRules[i].OtherEndpoint = otherEndpointInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := egressRulesMapStrToI[\"prefix_list\"]; ok && !isIntfNil(v) && !otherEndpointTypeFound {\n\n\t\t\t\totherEndpointTypeFound = true\n\t\t\t\totherEndpointInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_PrefixList{}\n\t\t\t\totherEndpointInt.PrefixList = &ves_io_schema_views.PrefixStringListType{}\n\t\t\t\tegressRules[i].OtherEndpoint = otherEndpointInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"prefixes\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\totherEndpointInt.PrefixList.Prefixes = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif w, ok := egressRulesMapStrToI[\"rule_description\"]; ok && !isIntfNil(w) {\n\t\t\t\tegressRules[i].RuleDescription = w.(string)\n\t\t\t}\n\n\t\t\tif w, ok := egressRulesMapStrToI[\"rule_name\"]; ok && !isIntfNil(w) {\n\t\t\t\tegressRules[i].RuleName = w.(string)\n\t\t\t}\n\n\t\t\ttrafficChoiceTypeFound := false\n\n\t\t\tif v, ok := egressRulesMapStrToI[\"all_tcp_traffic\"]; ok && !isIntfNil(v) && !trafficChoiceTypeFound {\n\n\t\t\t\ttrafficChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\ttrafficChoiceInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_AllTcpTraffic{}\n\t\t\t\t\ttrafficChoiceInt.AllTcpTraffic = &ves_io_schema.Empty{}\n\t\t\t\t\tegressRules[i].TrafficChoice = trafficChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := egressRulesMapStrToI[\"all_traffic\"]; ok && !isIntfNil(v) && !trafficChoiceTypeFound {\n\n\t\t\t\ttrafficChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\ttrafficChoiceInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_AllTraffic{}\n\t\t\t\t\ttrafficChoiceInt.AllTraffic = &ves_io_schema.Empty{}\n\t\t\t\t\tegressRules[i].TrafficChoice = trafficChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := egressRulesMapStrToI[\"all_udp_traffic\"]; ok && !isIntfNil(v) && !trafficChoiceTypeFound {\n\n\t\t\t\ttrafficChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\ttrafficChoiceInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_AllUdpTraffic{}\n\t\t\t\t\ttrafficChoiceInt.AllUdpTraffic = &ves_io_schema.Empty{}\n\t\t\t\t\tegressRules[i].TrafficChoice = trafficChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := egressRulesMapStrToI[\"applications\"]; ok && !isIntfNil(v) && !trafficChoiceTypeFound {\n\n\t\t\t\ttrafficChoiceTypeFound = true\n\t\t\t\ttrafficChoiceInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_Applications{}\n\t\t\t\ttrafficChoiceInt.Applications = &ves_io_schema_network_policy.ApplicationsType{}\n\t\t\t\tegressRules[i].TrafficChoice = trafficChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"applications\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tapplicationsList := []ves_io_schema_network_policy.ApplicationEnumType{}\n\t\t\t\t\t\tfor _, j := range v.([]interface{}) {\n\t\t\t\t\t\t\tapplicationsList = append(applicationsList, ves_io_schema_network_policy.ApplicationEnumType(ves_io_schema_network_policy.ApplicationEnumType_value[j.(string)]))\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttrafficChoiceInt.Applications.Applications = applicationsList\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := egressRulesMapStrToI[\"protocol_port_range\"]; ok && !isIntfNil(v) && !trafficChoiceTypeFound {\n\n\t\t\t\ttrafficChoiceTypeFound = true\n\t\t\t\ttrafficChoiceInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_ProtocolPortRange{}\n\t\t\t\ttrafficChoiceInt.ProtocolPortRange = &ves_io_schema_network_policy.ProtocolPortType{}\n\t\t\t\tegressRules[i].TrafficChoice = trafficChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"port_ranges\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttrafficChoiceInt.ProtocolPortRange.PortRanges = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"protocol\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\ttrafficChoiceInt.ProtocolPortRange.Protocol = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"endpoint\"); ok && !isIntfNil(v) {\n\n\t\tsl := v.(*schema.Set).List()\n\t\tendpoint := &ves_io_schema_network_policy.EndpointChoiceType{}\n\t\tupdateSpec.Endpoint = endpoint\n\t\tfor _, set := range sl {\n\t\t\tendpointMapStrToI := set.(map[string]interface{})\n\n\t\t\tendpointChoiceTypeFound := false\n\n\t\t\tif v, ok := endpointMapStrToI[\"any\"]; ok && !isIntfNil(v) && !endpointChoiceTypeFound {\n\n\t\t\t\tendpointChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tendpointChoiceInt := &ves_io_schema_network_policy.EndpointChoiceType_Any{}\n\t\t\t\t\tendpointChoiceInt.Any = &ves_io_schema.Empty{}\n\t\t\t\t\tendpoint.EndpointChoice = endpointChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := endpointMapStrToI[\"inside_endpoints\"]; ok && !isIntfNil(v) && !endpointChoiceTypeFound {\n\n\t\t\t\tendpointChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tendpointChoiceInt := &ves_io_schema_network_policy.EndpointChoiceType_InsideEndpoints{}\n\t\t\t\t\tendpointChoiceInt.InsideEndpoints = &ves_io_schema.Empty{}\n\t\t\t\t\tendpoint.EndpointChoice = endpointChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := endpointMapStrToI[\"interface\"]; ok && !isIntfNil(v) && !endpointChoiceTypeFound {\n\n\t\t\t\tendpointChoiceTypeFound = true\n\t\t\t\tendpointChoiceInt := &ves_io_schema_network_policy.EndpointChoiceType_Interface{}\n\t\t\t\tendpointChoiceInt.Interface = &ves_io_schema_views.ObjectRefType{}\n\t\t\t\tendpoint.EndpointChoice = endpointChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"name\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tendpointChoiceInt.Interface.Name = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"namespace\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tendpointChoiceInt.Interface.Namespace = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"tenant\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tendpointChoiceInt.Interface.Tenant = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := endpointMapStrToI[\"label_selector\"]; ok && !isIntfNil(v) && !endpointChoiceTypeFound {\n\n\t\t\t\tendpointChoiceTypeFound = true\n\t\t\t\tendpointChoiceInt := &ves_io_schema_network_policy.EndpointChoiceType_LabelSelector{}\n\t\t\t\tendpointChoiceInt.LabelSelector = &ves_io_schema.LabelSelectorType{}\n\t\t\t\tendpoint.EndpointChoice = endpointChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"expressions\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tendpointChoiceInt.LabelSelector.Expressions = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := endpointMapStrToI[\"namespace\"]; ok && !isIntfNil(v) && !endpointChoiceTypeFound {\n\n\t\t\t\tendpointChoiceTypeFound = true\n\t\t\t\tendpointChoiceInt := &ves_io_schema_network_policy.EndpointChoiceType_Namespace{}\n\n\t\t\t\tendpoint.EndpointChoice = endpointChoiceInt\n\n\t\t\t\tendpointChoiceInt.Namespace = v.(string)\n\n\t\t\t}\n\n\t\t\tif v, ok := endpointMapStrToI[\"outside_endpoints\"]; ok && !isIntfNil(v) && !endpointChoiceTypeFound {\n\n\t\t\t\tendpointChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\tendpointChoiceInt := &ves_io_schema_network_policy.EndpointChoiceType_OutsideEndpoints{}\n\t\t\t\t\tendpointChoiceInt.OutsideEndpoints = &ves_io_schema.Empty{}\n\t\t\t\t\tendpoint.EndpointChoice = endpointChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := endpointMapStrToI[\"prefix_list\"]; ok && !isIntfNil(v) && !endpointChoiceTypeFound {\n\n\t\t\t\tendpointChoiceTypeFound = true\n\t\t\t\tendpointChoiceInt := &ves_io_schema_network_policy.EndpointChoiceType_PrefixList{}\n\t\t\t\tendpointChoiceInt.PrefixList = &ves_io_schema_views.PrefixStringListType{}\n\t\t\t\tendpoint.EndpointChoice = endpointChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"prefixes\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tendpointChoiceInt.PrefixList.Prefixes = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"ingress_rules\"); ok && !isIntfNil(v) {\n\n\t\tsl := v.([]interface{})\n\t\tingressRules := make([]*ves_io_schema_network_policy.NetworkPolicyRuleType, len(sl))\n\t\tupdateSpec.IngressRules = ingressRules\n\t\tfor i, set := range sl {\n\t\t\tingressRules[i] = &ves_io_schema_network_policy.NetworkPolicyRuleType{}\n\t\t\tingressRulesMapStrToI := set.(map[string]interface{})\n\n\t\t\tif v, ok := ingressRulesMapStrToI[\"action\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tingressRules[i].Action = ves_io_schema_network_policy_rule.NetworkPolicyRuleAction(ves_io_schema_network_policy_rule.NetworkPolicyRuleAction_value[v.(string)])\n\n\t\t\t}\n\n\t\t\tif w, ok := ingressRulesMapStrToI[\"keys\"]; ok && !isIntfNil(w) {\n\t\t\t\tls := make([]string, len(w.([]interface{})))\n\t\t\t\tfor i, v := range w.([]interface{}) {\n\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t}\n\t\t\t\tingressRules[i].Keys = ls\n\t\t\t}\n\n\t\t\tif v, ok := ingressRulesMapStrToI[\"label_matcher\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tlabelMatcher := &ves_io_schema.LabelMatcherType{}\n\t\t\t\tingressRules[i].LabelMatcher = labelMatcher\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tlabelMatcherMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\tif w, ok := labelMatcherMapStrToI[\"keys\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\tls := make([]string, len(w.([]interface{})))\n\t\t\t\t\t\tfor i, v := range w.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlabelMatcher.Keys = ls\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := ingressRulesMapStrToI[\"metadata\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tmetadata := &ves_io_schema.MessageMetaType{}\n\t\t\t\tingressRules[i].Metadata = metadata\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tmetadataMapStrToI := set.(map[string]interface{})\n\n\t\t\t\t\tif w, ok := metadataMapStrToI[\"description\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\tmetadata.Description = w.(string)\n\t\t\t\t\t}\n\n\t\t\t\t\tif w, ok := metadataMapStrToI[\"disable\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\tmetadata.Disable = w.(bool)\n\t\t\t\t\t}\n\n\t\t\t\t\tif w, ok := metadataMapStrToI[\"name\"]; ok && !isIntfNil(w) {\n\t\t\t\t\t\tmetadata.Name = w.(string)\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\totherEndpointTypeFound := false\n\n\t\t\tif v, ok := ingressRulesMapStrToI[\"any\"]; ok && !isIntfNil(v) && !otherEndpointTypeFound {\n\n\t\t\t\totherEndpointTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\totherEndpointInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_Any{}\n\t\t\t\t\totherEndpointInt.Any = &ves_io_schema.Empty{}\n\t\t\t\t\tingressRules[i].OtherEndpoint = otherEndpointInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := ingressRulesMapStrToI[\"inside_endpoints\"]; ok && !isIntfNil(v) && !otherEndpointTypeFound {\n\n\t\t\t\totherEndpointTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\totherEndpointInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_InsideEndpoints{}\n\t\t\t\t\totherEndpointInt.InsideEndpoints = &ves_io_schema.Empty{}\n\t\t\t\t\tingressRules[i].OtherEndpoint = otherEndpointInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := ingressRulesMapStrToI[\"ip_prefix_set\"]; ok && !isIntfNil(v) && !otherEndpointTypeFound {\n\n\t\t\t\totherEndpointTypeFound = true\n\t\t\t\totherEndpointInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_IpPrefixSet{}\n\t\t\t\totherEndpointInt.IpPrefixSet = &ves_io_schema.IpPrefixSetRefType{}\n\t\t\t\tingressRules[i].OtherEndpoint = otherEndpointInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"ref\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tsl := v.([]interface{})\n\t\t\t\t\t\trefInt := make([]*ves_io_schema.ObjectRefType, len(sl))\n\t\t\t\t\t\totherEndpointInt.IpPrefixSet.Ref = refInt\n\t\t\t\t\t\tfor i, ps := range sl {\n\n\t\t\t\t\t\t\trMapToStrVal := ps.(map[string]interface{})\n\t\t\t\t\t\t\trefInt[i] = &ves_io_schema.ObjectRefType{}\n\n\t\t\t\t\t\t\trefInt[i].Kind = \"ip_prefix_set\"\n\n\t\t\t\t\t\t\tif v, ok := rMapToStrVal[\"name\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\trefInt[i].Name = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := rMapToStrVal[\"namespace\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\trefInt[i].Namespace = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := rMapToStrVal[\"tenant\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\trefInt[i].Tenant = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif v, ok := rMapToStrVal[\"uid\"]; ok && !isIntfNil(v) {\n\t\t\t\t\t\t\t\trefInt[i].Uid = v.(string)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := ingressRulesMapStrToI[\"label_selector\"]; ok && !isIntfNil(v) && !otherEndpointTypeFound {\n\n\t\t\t\totherEndpointTypeFound = true\n\t\t\t\totherEndpointInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_LabelSelector{}\n\t\t\t\totherEndpointInt.LabelSelector = &ves_io_schema.LabelSelectorType{}\n\t\t\t\tingressRules[i].OtherEndpoint = otherEndpointInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"expressions\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\totherEndpointInt.LabelSelector.Expressions = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := ingressRulesMapStrToI[\"namespace\"]; ok && !isIntfNil(v) && !otherEndpointTypeFound {\n\n\t\t\t\totherEndpointTypeFound = true\n\t\t\t\totherEndpointInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_Namespace{}\n\n\t\t\t\tingressRules[i].OtherEndpoint = otherEndpointInt\n\n\t\t\t\totherEndpointInt.Namespace = v.(string)\n\n\t\t\t}\n\n\t\t\tif v, ok := ingressRulesMapStrToI[\"outside_endpoints\"]; ok && !isIntfNil(v) && !otherEndpointTypeFound {\n\n\t\t\t\totherEndpointTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\totherEndpointInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_OutsideEndpoints{}\n\t\t\t\t\totherEndpointInt.OutsideEndpoints = &ves_io_schema.Empty{}\n\t\t\t\t\tingressRules[i].OtherEndpoint = otherEndpointInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := ingressRulesMapStrToI[\"prefix_list\"]; ok && !isIntfNil(v) && !otherEndpointTypeFound {\n\n\t\t\t\totherEndpointTypeFound = true\n\t\t\t\totherEndpointInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_PrefixList{}\n\t\t\t\totherEndpointInt.PrefixList = &ves_io_schema_views.PrefixStringListType{}\n\t\t\t\tingressRules[i].OtherEndpoint = otherEndpointInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"prefixes\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\totherEndpointInt.PrefixList.Prefixes = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif w, ok := ingressRulesMapStrToI[\"rule_description\"]; ok && !isIntfNil(w) {\n\t\t\t\tingressRules[i].RuleDescription = w.(string)\n\t\t\t}\n\n\t\t\tif w, ok := ingressRulesMapStrToI[\"rule_name\"]; ok && !isIntfNil(w) {\n\t\t\t\tingressRules[i].RuleName = w.(string)\n\t\t\t}\n\n\t\t\ttrafficChoiceTypeFound := false\n\n\t\t\tif v, ok := ingressRulesMapStrToI[\"all_tcp_traffic\"]; ok && !isIntfNil(v) && !trafficChoiceTypeFound {\n\n\t\t\t\ttrafficChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\ttrafficChoiceInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_AllTcpTraffic{}\n\t\t\t\t\ttrafficChoiceInt.AllTcpTraffic = &ves_io_schema.Empty{}\n\t\t\t\t\tingressRules[i].TrafficChoice = trafficChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := ingressRulesMapStrToI[\"all_traffic\"]; ok && !isIntfNil(v) && !trafficChoiceTypeFound {\n\n\t\t\t\ttrafficChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\ttrafficChoiceInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_AllTraffic{}\n\t\t\t\t\ttrafficChoiceInt.AllTraffic = &ves_io_schema.Empty{}\n\t\t\t\t\tingressRules[i].TrafficChoice = trafficChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := ingressRulesMapStrToI[\"all_udp_traffic\"]; ok && !isIntfNil(v) && !trafficChoiceTypeFound {\n\n\t\t\t\ttrafficChoiceTypeFound = true\n\n\t\t\t\tif v.(bool) {\n\t\t\t\t\ttrafficChoiceInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_AllUdpTraffic{}\n\t\t\t\t\ttrafficChoiceInt.AllUdpTraffic = &ves_io_schema.Empty{}\n\t\t\t\t\tingressRules[i].TrafficChoice = trafficChoiceInt\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := ingressRulesMapStrToI[\"applications\"]; ok && !isIntfNil(v) && !trafficChoiceTypeFound {\n\n\t\t\t\ttrafficChoiceTypeFound = true\n\t\t\t\ttrafficChoiceInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_Applications{}\n\t\t\t\ttrafficChoiceInt.Applications = &ves_io_schema_network_policy.ApplicationsType{}\n\t\t\t\tingressRules[i].TrafficChoice = trafficChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"applications\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tapplicationsList := []ves_io_schema_network_policy.ApplicationEnumType{}\n\t\t\t\t\t\tfor _, j := range v.([]interface{}) {\n\t\t\t\t\t\t\tapplicationsList = append(applicationsList, ves_io_schema_network_policy.ApplicationEnumType(ves_io_schema_network_policy.ApplicationEnumType_value[j.(string)]))\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttrafficChoiceInt.Applications.Applications = applicationsList\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tif v, ok := ingressRulesMapStrToI[\"protocol_port_range\"]; ok && !isIntfNil(v) && !trafficChoiceTypeFound {\n\n\t\t\t\ttrafficChoiceTypeFound = true\n\t\t\t\ttrafficChoiceInt := &ves_io_schema_network_policy.NetworkPolicyRuleType_ProtocolPortRange{}\n\t\t\t\ttrafficChoiceInt.ProtocolPortRange = &ves_io_schema_network_policy.ProtocolPortType{}\n\t\t\t\tingressRules[i].TrafficChoice = trafficChoiceInt\n\n\t\t\t\tsl := v.(*schema.Set).List()\n\t\t\t\tfor _, set := range sl {\n\t\t\t\t\tcs := set.(map[string]interface{})\n\n\t\t\t\t\tif v, ok := cs[\"port_ranges\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\tls := make([]string, len(v.([]interface{})))\n\t\t\t\t\t\tfor i, v := range v.([]interface{}) {\n\t\t\t\t\t\t\tls[i] = v.(string)\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttrafficChoiceInt.ProtocolPortRange.PortRanges = ls\n\n\t\t\t\t\t}\n\n\t\t\t\t\tif v, ok := cs[\"protocol\"]; ok && !isIntfNil(v) {\n\n\t\t\t\t\t\ttrafficChoiceInt.ProtocolPortRange.Protocol = v.(string)\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tlog.Printf(\"[DEBUG] Updating Volterra NetworkPolicyView obj with struct: %+v\", updateReq)\n\n\terr := client.ReplaceObject(context.Background(), ves_io_schema_views_network_policy_view.ObjectType, updateReq)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error updating NetworkPolicyView: %s\", err)\n\t}\n\n\treturn resourceVolterraNetworkPolicyViewRead(d, meta)\n}",
"func (s *overridePolicyLister) OverridePolicies(namespace string) OverridePolicyNamespaceLister {\n\treturn overridePolicyNamespaceLister{indexer: s.indexer, namespace: namespace}\n}",
"func NewGetSolPoliciesDefault(code int) *GetSolPoliciesDefault {\n\treturn &GetSolPoliciesDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func NewPatchNotFound(name string, id string, message string, temporary bool, timeout bool, fault bool) *goa.ServiceError {\n\tv := &goa.ServiceError{}\n\tv.Name = name\n\tv.ID = id\n\tv.Message = message\n\tv.Temporary = temporary\n\tv.Timeout = timeout\n\tv.Fault = fault\n\n\treturn v\n}",
"func (oc *Controller) initRetryPolicyWithDelete(policy *knet.NetworkPolicy, np *networkPolicy) {\n\toc.retryNetPolLock.Lock()\n\tdefer oc.retryNetPolLock.Unlock()\n\tkey := getPolicyNamespacedName(policy)\n\tif entry, ok := oc.retryNetPolices[key]; ok {\n\t\tentry.timeStamp = time.Now()\n\t\tentry.oldPolicy = policy\n\t\tif entry.np == nil {\n\t\t\tentry.np = np\n\t\t}\n\t} else {\n\t\toc.retryNetPolices[key] = &retryNetPolEntry{nil, policy, np, time.Now(), 1, true}\n\t}\n}",
"func NewPostKeysNotFound() *PostKeysNotFound {\n\treturn &PostKeysNotFound{}\n}",
"func (oc *Controller) initRetryPolicy(policy *knet.NetworkPolicy) {\n\toc.retryNetPolLock.Lock()\n\tdefer oc.retryNetPolLock.Unlock()\n\tkey := getPolicyNamespacedName(policy)\n\tif entry, ok := oc.retryNetPolices[key]; ok {\n\t\tentry.timeStamp = time.Now()\n\t\tentry.newPolicy = policy\n\t} else {\n\t\toc.retryNetPolices[key] = &retryNetPolEntry{policy, nil, nil, time.Now(), 1, true}\n\t}\n}",
"func newVaultPolicies(c *VaultPolicyV1Client) *vaultpolicies {\n\treturn &vaultpolicies{\n\t\tclient: c.RESTClient(),\n\t}\n}",
"func NewGetAPIPublicV1TeamTeamPoliciesNotFound() *GetAPIPublicV1TeamTeamPoliciesNotFound {\n\treturn &GetAPIPublicV1TeamTeamPoliciesNotFound{}\n}",
"func expandDefaultActionFixedResponseStatus(l []interface{}) *types.FixedResponseAction {\n\tlRaw := l[0].(map[string]interface{})\n\n\tfixedResponseAction := &types.FixedResponseAction{}\n\n\tif v, ok := lRaw[\"status_code\"].(int); ok {\n\t\tfixedResponseAction.StatusCode = aws.Int32(int32(v))\n\t}\n\n\treturn fixedResponseAction\n}",
"func (rm *resourceManager) customUpdateEndpointPreChecks(\n\tctx context.Context,\n\tdesired *resource,\n\tlatest *resource,\n\tdelta *ackcompare.Delta,\n) error {\n\tlatestStatus := latest.ko.Status.EndpointStatus\n\tif latestStatus == nil {\n\t\treturn nil\n\t}\n\n\tfailureReason := latest.ko.Status.FailureReason\n\tdesiredEndpointConfig := desired.ko.Spec.EndpointConfigName\n\n\tvar lastEndpointConfigForUpdate *string = nil\n\t// get last endpoint config name used for update from annotations\n\tannotations := desired.ko.ObjectMeta.GetAnnotations()\n\tfor k, v := range annotations {\n\t\tif k == lastEndpointConfigForUpdateAnnotation {\n\t\t\tlastEndpointConfigForUpdate = &v\n\t\t}\n\t}\n\n\t// Case 2 - EndpointStatus == Failed\n\tif *latestStatus == svcsdk.EndpointStatusFailed ||\n\t\t// Case 3 - A previous update to the Endpoint with same endpointConfigName failed\n\t\t// Following checks indicate FailureReason is related to a failed update\n\t\t// Note: Internal service error is an exception for this case\n\t\t// \"Request to service failed\" means update failed because of ISE and can be retried\n\t\t(failureReason != nil && lastEndpointConfigForUpdate != nil &&\n\t\t\t!strings.HasPrefix(*failureReason, FailureReasonInternalServiceErrorPrefix) &&\n\t\t\tdelta.DifferentAt(\"Spec.EndpointConfigName\") &&\n\t\t\t*desiredEndpointConfig == *lastEndpointConfigForUpdate) {\n\t\t// 1. FailureReason alone does mean an update failed it can appear because of other reasons(patching/scaling failed)\n\t\t// 2. *desiredEndpointConfig == *lastEndpointConfigForUpdate only tells us an update was tried with lastEndpointConfigForUpdate\n\t\t// but does not tell us anything if the update was successful or not in the past because it is set if updateEndpoint returns 200 (aync operation).\n\t\t// 3. Now, sdkUpdate can execute because of change in any field in Spec (like tags/deploymentConfig in future)\n\n\t\t// 1 & 2 does not guarantee an update Failed. Hence we need to look at `*latestEndpointConfigName` to determine if the update was unsuccessful\n\t\t// `*desiredEndpointConfig != *latestEndpointConfig` + `*desiredEndpointConfig == *lastEndpointConfigForUpdate`+ `FailureReason != nil` indicate that an update is needed,\n\t\t// has already been tried and failed.\n\t\treturn awserr.New(\"EndpointUpdateError\", fmt.Sprintf(\"unable to update endpoint. check FailureReason. latest EndpointConfigName is %s\", *latest.ko.Spec.EndpointConfigName), nil)\n\t}\n\n\treturn nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
IsSuccess returns true when this update r t response policies not found response has a 2xx status code | func (o *UpdateRTResponsePoliciesNotFound) IsSuccess() bool {
return false
} | [
"func (o *UpdateRTResponsePoliciesBadRequest) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesInternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *GetDeploymentRequestsUsingGET2Unauthorized) IsSuccess() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesBadRequest) IsSuccess() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesInternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesTooManyRequests) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateMTOServiceItemUnauthorized) IsSuccess() bool {\n\treturn false\n}",
"func (o *GetDeploymentRequestsUsingGET2NotFound) IsSuccess() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *GetGitWebhookByIDUsingGETUnauthorized) IsSuccess() bool {\n\treturn false\n}",
"func (o *GetGitWebhookByIDUsingGETInternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdatePlanByIDUnauthorized) IsSuccess() bool {\n\treturn false\n}",
"func (o *GetGitWebhookByIDUsingGETForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *PcloudIpsecpoliciesPostBadRequest) IsSuccess() bool {\n\treturn false\n}",
"func (o *GetDeploymentRequestsUsingGET2OK) IsSuccess() bool {\n\treturn true\n}",
"func (o *PcloudIpsecpoliciesPostInternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateExtensionUnauthorized) IsSuccess() bool {\n\treturn false\n}",
"func (o *PcloudIpsecpoliciesPostUnauthorized) IsSuccess() bool {\n\treturn false\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
IsRedirect returns true when this update r t response policies not found response has a 3xx status code | func (o *UpdateRTResponsePoliciesNotFound) IsRedirect() bool {
return false
} | [
"func (o *UpdateRTResponsePoliciesOK) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesBadRequest) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesInternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesForbidden) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesTooManyRequests) IsRedirect() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesBadRequest) IsRedirect() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesInternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *SnapmirrorPolicyModifyDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *QueryCombinedRTResponsePoliciesOK) IsRedirect() bool {\n\treturn false\n}",
"func (o *SoftwareGetDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *GetDeploymentRequestsUsingGET2OK) IsRedirect() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesForbidden) IsRedirect() bool {\n\treturn false\n}",
"func (o *VolumeEfficiencyPolicyModifyDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *QueryCombinedRTResponsePoliciesTooManyRequests) IsRedirect() bool {\n\treturn false\n}",
"func (o *GetDeviceControlPoliciesInternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *GetDeploymentRequestsUsingGET2NotFound) IsRedirect() bool {\n\treturn false\n}",
"func (o *GetDeploymentRequestsUsingGET2Unauthorized) IsRedirect() bool {\n\treturn false\n}",
"func (o *AutosupportModifyDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *FabricGetDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
IsCode returns true when this update r t response policies not found response a status code equal to that given | func (o *UpdateRTResponsePoliciesNotFound) IsCode(code int) bool {
return code == 404
} | [
"func (o *UpdateRTResponsePoliciesOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *UpdateRTResponsePoliciesInternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *QueryCombinedRTResponsePoliciesOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *UpdateRTResponsePoliciesForbidden) IsCode(code int) bool {\n\treturn code == 403\n}",
"func (o *DryRunPolicyUsingPOST2OK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *UpdateRTResponsePoliciesBadRequest) IsCode(code int) bool {\n\treturn code == 400\n}",
"func (o *QueryCombinedRTResponsePoliciesInternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *UpdateDetectsByIdsV2OK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *GetDeploymentRequestsUsingGET2OK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *UpdateExtensionOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *GetEndpointIDOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *VolumeEfficiencyPolicyModifyOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *SoftwareGetOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *GetGitWebhookByIDUsingGETInternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *PcloudIpsecpoliciesPostOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *UpdateMTOServiceItemOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *GetDeploymentRequestsUsingGET2Unauthorized) IsCode(code int) bool {\n\treturn code == 401\n}",
"func (o *UpdateRegistryEntitiesOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *UpdateRTResponsePoliciesTooManyRequests) IsCode(code int) bool {\n\treturn code == 429\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Code gets the status code for the update r t response policies not found response | func (o *UpdateRTResponsePoliciesNotFound) Code() int {
return 404
} | [
"func (o *UpdateRTResponsePoliciesOK) Code() int {\n\treturn 200\n}",
"func (o *UpdateRTResponsePoliciesForbidden) Code() int {\n\treturn 403\n}",
"func (o *UpdateRTResponsePoliciesInternalServerError) Code() int {\n\treturn 500\n}",
"func (o *UpdateRTResponsePoliciesBadRequest) Code() int {\n\treturn 400\n}",
"func (o *QueryCombinedRTResponsePoliciesOK) Code() int {\n\treturn 200\n}",
"func (o *GetRTResponsePoliciesDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *UpdateExtensionNotFound) Code() int {\n\treturn 404\n}",
"func (o *QueryCombinedRTResponsePoliciesForbidden) Code() int {\n\treturn 403\n}",
"func (o *QueryCombinedRTResponsePoliciesInternalServerError) Code() int {\n\treturn 500\n}",
"func (o *UpdateRTResponsePoliciesTooManyRequests) Code() int {\n\treturn 429\n}",
"func (o *GetDeviceControlPoliciesNotFound) Code() int {\n\treturn 404\n}",
"func (o *UpdateConnectionNotFound) Code() int {\n\treturn 404\n}",
"func (o *SnapmirrorPolicyModifyDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *UpdateMTOServiceItemNotFound) Code() int {\n\treturn 404\n}",
"func (o *UpdateMTOServiceItemOK) Code() int {\n\treturn 200\n}",
"func (o *GetSolPoliciesDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *PatchKvmPoliciesMoidDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *UpdateConnectionForbidden) Code() int {\n\treturn 403\n}",
"func (o *UpdateRoutingRuleNotFound) Code() int {\n\treturn 404\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewUpdateRTResponsePoliciesTooManyRequests creates a UpdateRTResponsePoliciesTooManyRequests with default headers values | func NewUpdateRTResponsePoliciesTooManyRequests() *UpdateRTResponsePoliciesTooManyRequests {
return &UpdateRTResponsePoliciesTooManyRequests{}
} | [
"func NewGetRTResponsePoliciesDefault(code int) *GetRTResponsePoliciesDefault {\n\treturn &GetRTResponsePoliciesDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func NewPostKeysTooManyRequests() *PostKeysTooManyRequests {\n\treturn &PostKeysTooManyRequests{}\n}",
"func AddNewThrottlePolicy(t *testing.T, client *apim.Client, adminUsername, adminPassword, policyType string, doClean bool) map[string]interface{} {\n\tclient.Login(adminUsername, adminPassword)\n\tgeneratedPolicy := client.GenerateSampleThrottlePolicyData(policyType)\n\taddedPolicy := client.AddThrottlePolicy(t, generatedPolicy, adminUsername, adminPassword, policyType, doClean)\n\treturn addedPolicy\n}",
"func NewPostLhvTooManyRequests() *PostLhvTooManyRequests {\n\treturn &PostLhvTooManyRequests{}\n}",
"func (c *Client) setNewLimitIfNeeded(response *resty.Response) error {\n\n\t// Now check to see if the Vulners backend tells us to slow down.\n\tlimitS := response.Header().Get(\"X-Vulners-Ratelimit-Reqlimit\")\n\tcurrRateS := response.Header().Get(\"X-Vulners-Ratelimit-Rate\")\n\tif currRateS != \"\" {\n\t\tcurrRate, err := strconv.ParseFloat(currRateS, 64)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Cannot parse X-Vulners-Ratelimit-Rate header: %s\", currRateS)\n\t\t}\n\t\tlimit, err := strconv.ParseFloat(limitS, 64)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Cannot parse X-Vulners-Ratelimit-Reqlimit header: %s\", limitS)\n\t\t}\n\n\t\tnewLimit := rate.Limit(limit)\n\t\t// Check if we need to slow down and do so if needed, or raise the limit if possible.\n\t\tif currRate > limit {\n\t\t\t// Shoot for 80% of the limit, trying to be nice.\n\t\t\tactualLimit := limit * 80 / 100\n\t\t\t// Now very, very slowly ramp up the amount of requests similar to what\n\t\t\t// the Vulners Python APIs do.\n\t\t\trateDifference := (currRate / (actualLimit / 100)) / 60\n\t\t\tnewLimit = rate.Limit((rateDifference * actualLimit) / 100.0)\n\t\t}\n\t\tfmt.Printf(\"Ok, setting limit %f \\n\", newLimit)\n\t\tc.limiter.SetLimit(newLimit)\n\t}\n\treturn nil\n}",
"func NewUpdateModeTooManyRequests() *UpdateModeTooManyRequests {\n\treturn &UpdateModeTooManyRequests{}\n}",
"func NewUpdateNotificationRuleTooManyRequests() *UpdateNotificationRuleTooManyRequests {\n\treturn &UpdateNotificationRuleTooManyRequests{}\n}",
"func NewPostPaymentdefaultsTooManyRequests() *PostPaymentdefaultsTooManyRequests {\n\treturn &PostPaymentdefaultsTooManyRequests{}\n}",
"func AddToExistingPolicies(aReview *v1.AdmissionReview) RuleResponse {\n\tvar rr RuleResponse\n\tvar aPolicy AdmissionPolicy\n\n\taJSON, _ := aReview.Request.Object.MarshalJSON()\n\n\terr := json.Unmarshal(aJSON, &aPolicy)\n\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t}\n\n\tfor _, rule := range aPolicy.Spec.Rules {\n\t\tvm := goja.New()\n\t\t_, err = vm.RunString(rule.Rule)\n\t\tif err != nil {\n\t\t\trr.Allowed = false\n\t\t\trr.Message = \"Policy did not pass checks. Check for errors in policy\"\n\t\t\trr.Status = \"Failure\"\n\n\t\t\treturn rr\n\t\t}\n\t}\n\n\tlogger.LogStuff(\"Policy Review Object is:\", aReview)\n\n\tlogger.LogStuff(\"Policy is: \", aPolicy)\n\n\tAdmissionPolicies = append(AdmissionPolicies, aPolicy)\n\n\trr.Allowed = true\n\trr.Message = \"Policy loaded\"\n\trr.Status = \"Success\"\n\n\tPrintPolicies()\n\n\treturn rr\n\n}",
"func NewUpdateLookmlModelTooManyRequests() *UpdateLookmlModelTooManyRequests {\n\treturn &UpdateLookmlModelTooManyRequests{}\n}",
"func NewPostResourceServersTooManyRequests() *PostResourceServersTooManyRequests {\n\treturn &PostResourceServersTooManyRequests{}\n}",
"func NewUpdateSpaceTooManyRequests() *UpdateSpaceTooManyRequests {\n\treturn &UpdateSpaceTooManyRequests{}\n}",
"func resourceVolterraRateLimiterUpdate(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*APIClient)\n\n\tupdateMeta := &ves_io_schema.ObjectReplaceMetaType{}\n\tupdateSpec := &ves_io_schema_rate_limiter.ReplaceSpecType{}\n\tupdateReq := &ves_io_schema_rate_limiter.ReplaceRequest{\n\t\tMetadata: updateMeta,\n\t\tSpec: updateSpec,\n\t}\n\tif v, ok := d.GetOk(\"annotations\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tupdateMeta.Annotations = ms\n\t}\n\n\tif v, ok := d.GetOk(\"description\"); ok && !isIntfNil(v) {\n\t\tupdateMeta.Description =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"disable\"); ok && !isIntfNil(v) {\n\t\tupdateMeta.Disable =\n\t\t\tv.(bool)\n\t}\n\n\tif v, ok := d.GetOk(\"labels\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tupdateMeta.Labels = ms\n\t}\n\n\tif v, ok := d.GetOk(\"name\"); ok && !isIntfNil(v) {\n\t\tupdateMeta.Name =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"namespace\"); ok && !isIntfNil(v) {\n\t\tupdateMeta.Namespace =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"limits\"); ok && !isIntfNil(v) {\n\n\t\tsl := v.([]interface{})\n\t\tlimits := make([]*ves_io_schema_rate_limiter.RateLimitValue, len(sl))\n\t\tupdateSpec.Limits = limits\n\t\tfor i, set := range sl {\n\t\t\tlimits[i] = &ves_io_schema_rate_limiter.RateLimitValue{}\n\t\t\tlimitsMapStrToI := set.(map[string]interface{})\n\n\t\t\tif w, ok := limitsMapStrToI[\"burst_multiplier\"]; ok && !isIntfNil(w) {\n\t\t\t\tlimits[i].BurstMultiplier = uint32(w.(int))\n\t\t\t}\n\n\t\t\tif w, ok := limitsMapStrToI[\"total_number\"]; ok && !isIntfNil(w) {\n\t\t\t\tlimits[i].TotalNumber = uint32(w.(int))\n\t\t\t}\n\n\t\t\tif v, ok := limitsMapStrToI[\"unit\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tlimits[i].Unit = ves_io_schema_rate_limiter.RateLimitPeriodUnit(ves_io_schema_rate_limiter.RateLimitPeriodUnit_value[v.(string)])\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif v, ok := d.GetOk(\"user_identification\"); ok && !isIntfNil(v) {\n\n\t\tsl := v.([]interface{})\n\t\tuserIdentificationInt := make([]*ves_io_schema.ObjectRefType, len(sl))\n\t\tupdateSpec.UserIdentification = userIdentificationInt\n\t\tfor i, ps := range sl {\n\n\t\t\tuiMapToStrVal := ps.(map[string]interface{})\n\t\t\tuserIdentificationInt[i] = &ves_io_schema.ObjectRefType{}\n\n\t\t\tuserIdentificationInt[i].Kind = \"user_identification\"\n\n\t\t\tif v, ok := uiMapToStrVal[\"name\"]; ok && !isIntfNil(v) {\n\t\t\t\tuserIdentificationInt[i].Name = v.(string)\n\t\t\t}\n\n\t\t\tif v, ok := uiMapToStrVal[\"namespace\"]; ok && !isIntfNil(v) {\n\t\t\t\tuserIdentificationInt[i].Namespace = v.(string)\n\t\t\t}\n\n\t\t\tif v, ok := uiMapToStrVal[\"tenant\"]; ok && !isIntfNil(v) {\n\t\t\t\tuserIdentificationInt[i].Tenant = v.(string)\n\t\t\t}\n\n\t\t\tif v, ok := uiMapToStrVal[\"uid\"]; ok && !isIntfNil(v) {\n\t\t\t\tuserIdentificationInt[i].Uid = v.(string)\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tlog.Printf(\"[DEBUG] Updating Volterra RateLimiter obj with struct: %+v\", updateReq)\n\n\terr := client.ReplaceObject(context.Background(), ves_io_schema_rate_limiter.ObjectType, updateReq)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error updating RateLimiter: %s\", err)\n\t}\n\n\treturn resourceVolterraRateLimiterRead(d, meta)\n}",
"func (o *UpdateRTResponsePoliciesTooManyRequests) Code() int {\n\treturn 429\n}",
"func setRateLimitResponseHeaders(lmt *limiter.Limiter, w http.ResponseWriter, tokensLeft int) {\n\tw.Header().Add(\"RateLimit-Limit\", fmt.Sprintf(\"%d\", int(math.Round(lmt.GetMax()))))\n\tw.Header().Add(\"RateLimit-Reset\", \"1\")\n\tw.Header().Add(\"RateLimit-Remaining\", fmt.Sprintf(\"%d\", tokensLeft))\n}",
"func TooManyRequests(w http.ResponseWriter, message string) {\n\twriteError(w, http.StatusTooManyRequests, message)\n}",
"func NewThrottle(rps int, opts ...Option) (*Throttle, error) {\n\toptions := throttleOptions{burstLimit: 10}\n\tfor _, opt := range opts {\n\t\topt(&options)\n\t}\n\tif options.clock == nil {\n\t\toptions.clock = clock.NewReal()\n\t}\n\n\tif rps <= 0 {\n\t\treturn nil, fmt.Errorf(\"rate limiter requests per second must be more than zero\")\n\t}\n\n\tif options.burstLimit < 0 {\n\t\treturn nil, fmt.Errorf(\"rate limiter burst limit must zero or more\")\n\t}\n\n\tthrottle := &Throttle{\n\t\tclock: options.clock,\n\t\trequestInterval: time.Second.Nanoseconds() / int64(rps),\n\t\tmaxSlack: options.burstLimit * time.Second.Nanoseconds() / int64(rps),\n\t}\n\tthrottle.minAllowableTime = atomic.NewInt64(throttle.clock.Now().UnixNano() - throttle.maxSlack)\n\treturn throttle, nil\n}",
"func (app *RobotApp) SetNewLimiter(qps int) {\n\tif qps != int(app.QPS) {\n\t\tapp.QPS = int64(qps)\n\t}\n\tapp.limiter = ratelimit.New(qps)\n}",
"func WithMaxPollingAttempts(maxPollingAttempts int) func(plane *HTTPEventSource) {\n\treturn func(ns *HTTPEventSource) {\n\t\tns.maxAttempts = maxPollingAttempts\n\t}\n}",
"func resourceVolterraRateLimiterCreate(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*APIClient)\n\n\tcreateMeta := &ves_io_schema.ObjectCreateMetaType{}\n\tcreateSpec := &ves_io_schema_rate_limiter.CreateSpecType{}\n\tcreateReq := &ves_io_schema_rate_limiter.CreateRequest{\n\t\tMetadata: createMeta,\n\t\tSpec: createSpec,\n\t}\n\n\tif v, ok := d.GetOk(\"annotations\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tcreateMeta.Annotations = ms\n\t}\n\n\tif v, ok := d.GetOk(\"description\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Description =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"disable\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Disable =\n\t\t\tv.(bool)\n\t}\n\n\tif v, ok := d.GetOk(\"labels\"); ok && !isIntfNil(v) {\n\n\t\tms := map[string]string{}\n\n\t\tfor k, v := range v.(map[string]interface{}) {\n\t\t\tval := v.(string)\n\t\t\tms[k] = val\n\t\t}\n\t\tcreateMeta.Labels = ms\n\t}\n\n\tif v, ok := d.GetOk(\"name\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Name =\n\t\t\tv.(string)\n\t}\n\n\tif v, ok := d.GetOk(\"namespace\"); ok && !isIntfNil(v) {\n\t\tcreateMeta.Namespace =\n\t\t\tv.(string)\n\t}\n\n\t//limits\n\tif v, ok := d.GetOk(\"limits\"); ok && !isIntfNil(v) {\n\n\t\tsl := v.([]interface{})\n\t\tlimits := make([]*ves_io_schema_rate_limiter.RateLimitValue, len(sl))\n\t\tcreateSpec.Limits = limits\n\t\tfor i, set := range sl {\n\t\t\tlimits[i] = &ves_io_schema_rate_limiter.RateLimitValue{}\n\t\t\tlimitsMapStrToI := set.(map[string]interface{})\n\n\t\t\tif w, ok := limitsMapStrToI[\"burst_multiplier\"]; ok && !isIntfNil(w) {\n\t\t\t\tlimits[i].BurstMultiplier = uint32(w.(int))\n\t\t\t}\n\n\t\t\tif w, ok := limitsMapStrToI[\"total_number\"]; ok && !isIntfNil(w) {\n\t\t\t\tlimits[i].TotalNumber = uint32(w.(int))\n\t\t\t}\n\n\t\t\tif v, ok := limitsMapStrToI[\"unit\"]; ok && !isIntfNil(v) {\n\n\t\t\t\tlimits[i].Unit = ves_io_schema_rate_limiter.RateLimitPeriodUnit(ves_io_schema_rate_limiter.RateLimitPeriodUnit_value[v.(string)])\n\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\t//user_identification\n\tif v, ok := d.GetOk(\"user_identification\"); ok && !isIntfNil(v) {\n\n\t\tsl := v.([]interface{})\n\t\tuserIdentificationInt := make([]*ves_io_schema.ObjectRefType, len(sl))\n\t\tcreateSpec.UserIdentification = userIdentificationInt\n\t\tfor i, ps := range sl {\n\n\t\t\tuiMapToStrVal := ps.(map[string]interface{})\n\t\t\tuserIdentificationInt[i] = &ves_io_schema.ObjectRefType{}\n\n\t\t\tuserIdentificationInt[i].Kind = \"user_identification\"\n\n\t\t\tif v, ok := uiMapToStrVal[\"name\"]; ok && !isIntfNil(v) {\n\t\t\t\tuserIdentificationInt[i].Name = v.(string)\n\t\t\t}\n\n\t\t\tif v, ok := uiMapToStrVal[\"namespace\"]; ok && !isIntfNil(v) {\n\t\t\t\tuserIdentificationInt[i].Namespace = v.(string)\n\t\t\t}\n\n\t\t\tif v, ok := uiMapToStrVal[\"tenant\"]; ok && !isIntfNil(v) {\n\t\t\t\tuserIdentificationInt[i].Tenant = v.(string)\n\t\t\t}\n\n\t\t\tif v, ok := uiMapToStrVal[\"uid\"]; ok && !isIntfNil(v) {\n\t\t\t\tuserIdentificationInt[i].Uid = v.(string)\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tlog.Printf(\"[DEBUG] Creating Volterra RateLimiter object with struct: %+v\", createReq)\n\n\tcreateRateLimiterResp, err := client.CreateObject(context.Background(), ves_io_schema_rate_limiter.ObjectType, createReq)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating RateLimiter: %s\", err)\n\t}\n\td.SetId(createRateLimiterResp.GetObjSystemMetadata().GetUid())\n\n\treturn resourceVolterraRateLimiterRead(d, meta)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
IsSuccess returns true when this update r t response policies too many requests response has a 2xx status code | func (o *UpdateRTResponsePoliciesTooManyRequests) IsSuccess() bool {
return false
} | [
"func (o *QueryCombinedRTResponsePoliciesTooManyRequests) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesBadRequest) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesInternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *GetEndpointIDTooManyRequests) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateDetectsByIdsV2TooManyRequests) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateRegistryEntitiesTooManyRequests) IsSuccess() bool {\n\treturn false\n}",
"func (o *GetDeploymentRequestsUsingGET2Unauthorized) IsSuccess() bool {\n\treturn false\n}",
"func (o *CreateAzureSubscriptionTooManyRequests) IsSuccess() bool {\n\treturn false\n}",
"func (o *ExtractionGetV1TooManyRequests) IsSuccess() bool {\n\treturn false\n}",
"func (o *BatchRefreshSessionsTooManyRequests) IsSuccess() bool {\n\treturn false\n}",
"func (o *GetIncidentsTooManyRequests) IsSuccess() bool {\n\treturn false\n}",
"func (o *GetDeviceControlPoliciesTooManyRequests) IsSuccess() bool {\n\treturn false\n}",
"func (o *GetIntelRuleEntitiesTooManyRequests) IsSuccess() bool {\n\treturn false\n}",
"func (o *BatchGetCmdStatusTooManyRequests) IsSuccess() bool {\n\treturn false\n}",
"func (o *CreateDeviceControlPoliciesTooManyRequests) IsSuccess() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesBadRequest) IsSuccess() bool {\n\treturn false\n}",
"func (o *GetPolicyContainersTooManyRequests) IsSuccess() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesInternalServerError) IsSuccess() bool {\n\treturn false\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
IsRedirect returns true when this update r t response policies too many requests response has a 3xx status code | func (o *UpdateRTResponsePoliciesTooManyRequests) IsRedirect() bool {
return false
} | [
"func (o *QueryCombinedRTResponsePoliciesTooManyRequests) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesNotFound) IsRedirect() bool {\n\treturn false\n}",
"func (o *ExtractionGetV1TooManyRequests) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesOK) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesBadRequest) IsRedirect() bool {\n\treturn false\n}",
"func (o *SnapmirrorPolicyModifyDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *UpdateRTResponsePoliciesInternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesForbidden) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateDetectsByIdsV2TooManyRequests) IsRedirect() bool {\n\treturn false\n}",
"func (o *VolumeEfficiencyPolicyModifyDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *SoftwareGetDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *QueryCombinedRTResponsePoliciesInternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *GetEndpointIDTooManyRequests) IsRedirect() bool {\n\treturn false\n}",
"func (o *GetRunImportanceDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *QueryCombinedRTResponsePoliciesBadRequest) IsRedirect() bool {\n\treturn false\n}",
"func (o *GetIntelRuleEntitiesTooManyRequests) IsRedirect() bool {\n\treturn false\n}",
"func (o *AutosupportModifyDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *TenancyTenantsReadDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *GetDeviceControlPoliciesTooManyRequests) IsRedirect() bool {\n\treturn false\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
IsCode returns true when this update r t response policies too many requests response a status code equal to that given | func (o *UpdateRTResponsePoliciesTooManyRequests) IsCode(code int) bool {
return code == 429
} | [
"func (o *UpdateRTResponsePoliciesOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *QueryCombinedRTResponsePoliciesTooManyRequests) IsCode(code int) bool {\n\treturn code == 429\n}",
"func (o *UpdateDetectsByIdsV2TooManyRequests) IsCode(code int) bool {\n\treturn code == 429\n}",
"func (o *BatchGetCmdStatusTooManyRequests) IsCode(code int) bool {\n\treturn code == 429\n}",
"func (o *CreateDeviceControlPoliciesTooManyRequests) IsCode(code int) bool {\n\treturn code == 429\n}",
"func (o *RTRExecuteActiveResponderCommandTooManyRequests) IsCode(code int) bool {\n\treturn code == 429\n}",
"func (o *UpdateRegistryEntitiesTooManyRequests) IsCode(code int) bool {\n\treturn code == 429\n}",
"func (o *UpdateRTResponsePoliciesInternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *GetDeploymentRequestsUsingGET2OK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *GetDeviceControlPoliciesTooManyRequests) IsCode(code int) bool {\n\treturn code == 429\n}",
"func (o *QueryCombinedRTResponsePoliciesOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *BatchRefreshSessionsTooManyRequests) IsCode(code int) bool {\n\treturn code == 429\n}",
"func (o *ExtractionGetV1TooManyRequests) IsCode(code int) bool {\n\treturn code == 429\n}",
"func (o *GetEndpointIDTooManyRequests) IsCode(code int) bool {\n\treturn code == 429\n}",
"func (o *QueryCombinedRTResponsePoliciesInternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *GetPolicyContainersTooManyRequests) IsCode(code int) bool {\n\treturn code == 429\n}",
"func (o *GetIntelRuleFileTooManyRequests) IsCode(code int) bool {\n\treturn code == 429\n}",
"func (o *GetIntelRuleEntitiesTooManyRequests) IsCode(code int) bool {\n\treturn code == 429\n}",
"func (o *PatchEntitiesAlertsV2TooManyRequests) IsCode(code int) bool {\n\treturn code == 429\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Code gets the status code for the update r t response policies too many requests response | func (o *UpdateRTResponsePoliciesTooManyRequests) Code() int {
return 429
} | [
"func (o *UpdateRTResponsePoliciesOK) Code() int {\n\treturn 200\n}",
"func (o *UpdateRTResponsePoliciesForbidden) Code() int {\n\treturn 403\n}",
"func (o *UpdateRTResponsePoliciesInternalServerError) Code() int {\n\treturn 500\n}",
"func (o *UpdateRTResponsePoliciesNotFound) Code() int {\n\treturn 404\n}",
"func (o *UpdateRegistryEntitiesTooManyRequests) Code() int {\n\treturn 429\n}",
"func (o *QueryCombinedRTResponsePoliciesTooManyRequests) Code() int {\n\treturn 429\n}",
"func (o *UpdateHTTPSettingsDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *UpdateDetectsByIdsV2TooManyRequests) Code() int {\n\treturn 429\n}",
"func (o *PatchEntitiesAlertsV2TooManyRequests) Code() int {\n\treturn 429\n}",
"func (o *UpdateMTOServiceItemOK) Code() int {\n\treturn 200\n}",
"func (o *UpdateRTResponsePoliciesBadRequest) Code() int {\n\treturn 400\n}",
"func (o *BatchGetCmdStatusTooManyRequests) Code() int {\n\treturn 429\n}",
"func (o *UpdateModeDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *UpdateConnectionForbidden) Code() int {\n\treturn 403\n}",
"func (o *RTRListScriptsTooManyRequests) Code() int {\n\treturn 429\n}",
"func (o *UpdateExtensionOK) Code() int {\n\treturn 200\n}",
"func (r *Response) StatusCode() int { return r.res.StatusCode }",
"func (o *QueryCombinedRTResponsePoliciesInternalServerError) Code() int {\n\treturn 500\n}",
"func (o *UpdateConnectionOK) Code() int {\n\treturn 200\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
IsSuccess returns true when this update r t response policies internal server error response has a 2xx status code | func (o *UpdateRTResponsePoliciesInternalServerError) IsSuccess() bool {
return false
} | [
"func (o *UpdateRTResponsePoliciesBadRequest) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateMTOServiceItemInternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesForbidden) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateMTOServiceItemBadRequest) IsSuccess() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesInternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateMTOServiceItemUnauthorized) IsSuccess() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesBadRequest) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateDetectsByIdsV2InternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *PcloudIpsecpoliciesPostInternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *GetGitWebhookByIDUsingGETInternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesNotFound) IsSuccess() bool {\n\treturn false\n}",
"func (o *FrontPutBinaryInternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *PcloudImagesGetInternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *PcloudPvminstancesConsolePutInternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesTooManyRequests) IsSuccess() bool {\n\treturn false\n}",
"func (o *UpdateExtensionBadRequest) IsSuccess() bool {\n\treturn false\n}",
"func (o *PcloudPvminstancesConsoleGetInternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *PcloudV2VolumesclonePostInternalServerError) IsSuccess() bool {\n\treturn false\n}",
"func (o *GetDeploymentRequestsUsingGET2Unauthorized) IsSuccess() bool {\n\treturn false\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
IsRedirect returns true when this update r t response policies internal server error response has a 3xx status code | func (o *UpdateRTResponsePoliciesInternalServerError) IsRedirect() bool {
return false
} | [
"func (o *UpdateRTResponsePoliciesBadRequest) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesNotFound) IsRedirect() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesInternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *QueryCombinedRTResponsePoliciesBadRequest) IsRedirect() bool {\n\treturn false\n}",
"func (o *SoftwareGetDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *GetDeviceControlPoliciesInternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesOK) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesTooManyRequests) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateRTResponsePoliciesForbidden) IsRedirect() bool {\n\treturn false\n}",
"func (o *ExtractionGetV1InternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *VolumeEfficiencyPolicyModifyDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *SnapmirrorPolicyModifyDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *PcloudImagesGetInternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *GetAgreementInternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *UpdateDetectsByIdsV2InternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *PcloudTenantsSshkeysGetInternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *FabricGetDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}",
"func (o *UpdateMTOServiceItemInternalServerError) IsRedirect() bool {\n\treturn false\n}",
"func (o *AutosupportModifyDefault) IsRedirect() bool {\n\treturn o._statusCode/100 == 3\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
IsCode returns true when this update r t response policies internal server error response a status code equal to that given | func (o *UpdateRTResponsePoliciesInternalServerError) IsCode(code int) bool {
return code == 500
} | [
"func (o *QueryCombinedRTResponsePoliciesInternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *UpdateRTResponsePoliciesOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *GetGitWebhookByIDUsingGETInternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *UpdateRTResponsePoliciesBadRequest) IsCode(code int) bool {\n\treturn code == 400\n}",
"func (o *GetDeviceControlPoliciesInternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *UpdateDetectsByIdsV2InternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *CreateDeviceControlPoliciesInternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *BatchGetCmdStatusInternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *PostIpamIPFailure) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *PatchPrefilterFailure) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *DryRunPolicyUsingPOST2OK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *UpdateExtensionOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *UpdateMTOServiceItemPreconditionFailed) IsCode(code int) bool {\n\treturn code == 412\n}",
"func (o *PcloudPvminstancesConsoleGetInternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *PcloudTenantsSshkeysGetInternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *UpdateRTResponsePoliciesNotFound) IsCode(code int) bool {\n\treturn code == 404\n}",
"func (o *UpdateMTOServiceItemInternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}",
"func (o *GetEndpointIDOK) IsCode(code int) bool {\n\treturn code == 200\n}",
"func (o *PcloudIpsecpoliciesPostInternalServerError) IsCode(code int) bool {\n\treturn code == 500\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Code gets the status code for the update r t response policies internal server error response | func (o *UpdateRTResponsePoliciesInternalServerError) Code() int {
return 500
} | [
"func (o *UpdateRTResponsePoliciesOK) Code() int {\n\treturn 200\n}",
"func (o *QueryCombinedRTResponsePoliciesInternalServerError) Code() int {\n\treturn 500\n}",
"func (o *UpdateRTResponsePoliciesForbidden) Code() int {\n\treturn 403\n}",
"func (o *UpdateRulesV1InternalServerError) Code() int {\n\treturn 500\n}",
"func (o *UpdateRTResponsePoliciesBadRequest) Code() int {\n\treturn 400\n}",
"func (o *UpdateMTOServiceItemInternalServerError) Code() int {\n\treturn 500\n}",
"func (o *UpdateRTResponsePoliciesNotFound) Code() int {\n\treturn 404\n}",
"func (r *Response) StatusCode() int { return r.res.StatusCode }",
"func (o *UpdateHTTPSettingsDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *UpdateDetectsByIdsV2InternalServerError) Code() int {\n\treturn 500\n}",
"func (o *UpdateRTResponsePoliciesTooManyRequests) Code() int {\n\treturn 429\n}",
"func (o *UpdateMTOServiceItemPreconditionFailed) Code() int {\n\treturn 412\n}",
"func (o *CreateDeviceControlPoliciesInternalServerError) Code() int {\n\treturn 500\n}",
"func (o *UpdateConnectionForbidden) Code() int {\n\treturn 403\n}",
"func (o *UpdateExtensionOK) Code() int {\n\treturn 200\n}",
"func (o *UpdateMTOServiceItemOK) Code() int {\n\treturn 200\n}",
"func (o *UpdateExtensionUnauthorized) Code() int {\n\treturn 401\n}",
"func (o *PatchEntitiesAlertsV2InternalServerError) Code() int {\n\treturn 500\n}",
"func (o *RemoveServerGroupInUpstreamInternalServerError) Code() int {\n\treturn 500\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SetBook sets the Book field's value. | func (s *CreateBookOutput) SetBook(v *BookData) *CreateBookOutput {
s.Book = v
return s
} | [
"func (s *UpdateBookOutput) SetBook(v *BookData) *UpdateBookOutput {\n\ts.Book = v\n\treturn s\n}",
"func (ap *Adapter) SetAddrBook(addrBook AddrBook) {\n\tap.addrBook = addrBook\n}",
"func (k Keeper) setOrderBook(ctx sdk.Context, orderBook OrderBook) {\n\tstore := ctx.KVStore(k.storeKey)\n\tbz, err := k.cdc.MarshalBinary(orderBook)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tstore.Set(orderBook.Key, bz)\n}",
"func (s *ListBooksOutput) SetBooks(v []*BookRef) *ListBooksOutput {\n\ts.Books = v\n\treturn s\n}",
"func PutBook(r *http.Request) (Book, error) {\n\t// get form values\n\tbk := Book{}\n\tbk.Isbn = r.FormValue(\"isbn\")\n\tbk.Title = r.FormValue(\"title\")\n\tbk.Author = r.FormValue(\"author\")\n\tp := r.FormValue(\"price\")\n\n\t// validate form values\n\tif bk.Isbn == \"\" || bk.Title == \"\" || bk.Author == \"\" || p == \"\" {\n\t\treturn bk, errors.New(\"400. BadRequest\")\n\t}\n\n\t// convert form values\n\tfloat, err := strconv.ParseFloat(p, 32)\n\tif err != nil {\n\t\treturn bk, errors.New(\"400. BadRequest\")\n\t}\n\tbk.Price = float32(float)\n\n\t// insert values\n\terr = config.Books.Insert(bk)\n\tif err != nil {\n\t\treturn bk, errors.New(\"500. Internal Server Error\" + err.Error())\n\n\t}\n\treturn bk, nil\n}",
"func (m *Room) SetBookingType(value *BookingType)() {\n err := m.GetBackingStore().Set(\"bookingType\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (m *RecommendationsMutation) SetRunbook(s string) {\n\tm.runbook = &s\n}",
"func (s *Service) PostBook(book models.Book) (models.Book, error) {\n\tif result := s.DB.Save(&book); result.Error != nil {\n\t\treturn models.Book{}, result.Error\n\t}\n\treturn book, nil\n}",
"func (r *CloudloadingService) UpdateBook(bookscloudloadingresource *BooksCloudloadingResource) *CloudloadingUpdateBookCall {\n\tc := &CloudloadingUpdateBookCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.bookscloudloadingresource = bookscloudloadingresource\n\treturn c\n}",
"func (s *DescribeBookInput) SetBookName(v string) *DescribeBookInput {\n\ts.BookName = &v\n\treturn s\n}",
"func (c controller) RecordBook(b *model.Book) xerror.Xerror {\n\tvar err error\n\tvar querySQL postgres.Query\n\tvar tx postgres.Transaction\n\tvar row postgres.ScanRow\n\n\tif tx, err = c.driverSQL.NewTransaction(); err != nil {\n\t\treturn newInternalErr(err)\n\t}\n\tdefer tx.Rollback()\n\n\t// create book\n\tstr := `INSERT INTO h_book(title, description, genre, publish, owner_id, creation_date) VALUES($1, $2, $3, $4, $5, 'now') RETURNING id, creation_date`\n\tif querySQL, err = postgres.NewQuery(str,\n\t\tb.Title,\n\t\tb.Description,\n\t\tb.Genre,\n\t\tb.Publish,\n\t\tb.Owner,\n\t); err != nil {\n\t\treturn newInternalErr(err)\n\t}\n\tif row, err = tx.WithRow(querySQL); err != nil {\n\t\treturn newInternalErr(err)\n\t}\n\tif err = row.Scan(&b.Identifier, &b.CreationDate); err != nil {\n\t\treturn catchErr(err)\n\t}\n\ttx.Commit()\n\treturn nil\n}",
"func (s *UpdateBookInput) SetBookName(v string) *UpdateBookInput {\n\ts.BookName = &v\n\treturn s\n}",
"func PostBook(w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\n\tbook := model.NewBook()\n\terr := json.NewDecoder(r.Body).Decode(&book)\n\tif err != nil {\n\t\tlog.Errorf(\"%v\", err)\n\t\twriteJSONFail(w, 400, \"The Post Body was invalid\")\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\n\t// validate that the books attributes are in the appropriate bounds\n\terr = book.Validate()\n\tif err != nil {\n\t\twriteJSONFail(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\n\tlibrary := managers.GetLibrary()\n\tlibrary.AddBook(book)\n\n\twriteJSONSuccess(w, \"\", http.StatusCreated)\n}",
"func (api *CRUD) updateBook() µ.Endpoint {\n\tvar (\n\t\tid string\n\t\tbook Book\n\t)\n\n\treturn µ.PUT(\n\t\tµ.Path(path.Is(\"books\"), path.String(&id)),\n\t\tµ.Header(header.ContentJSON()),\n\t\tµ.Body(&book),\n\t\tµ.FMap(func() error {\n\t\t\tbook.ID = iri.New(id)\n\t\t\tif err := api.db.Update(&book, title.Exists()); err != nil {\n\t\t\t\treturn µ.InternalServerError(err)\n\t\t\t}\n\t\t\treturn µ.Ok().JSON(book)\n\t\t}),\n\t)\n}",
"func (t *TransactionMetadata) SetLazyBooked(lazyBooked bool) (modified bool) {\n\tt.lazyBookedMutex.Lock()\n\tdefer t.lazyBookedMutex.Unlock()\n\n\tif t.lazyBooked == lazyBooked {\n\t\treturn\n\t}\n\n\tt.lazyBooked = lazyBooked\n\tt.SetModified()\n\tmodified = true\n\n\treturn\n}",
"func NewBook(c *fiber.Ctx) {\n\tc.Send(\"All Books\")\n}",
"func (o *GetMarketdataHistoryParams) SetBar(bar *string) {\n\to.Bar = bar\n}",
"func (r *CloudloadingService) DeleteBook(volumeId string) *CloudloadingDeleteBookCall {\n\tc := &CloudloadingDeleteBookCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.urlParams_.Set(\"volumeId\", volumeId)\n\treturn c\n}",
"func (lib *Library) AddBook(title, author, ISBN string)error{\n\trows, err := lib.db.Query(\"select count(*) from BT;\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\trows.Next()\n\tvar cnt int\n\terr = rows.Scan(&cnt)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcnt = cnt + 1\n\t_, err = lib.db.Exec(fmt.Sprintf(\"insert into BT values('%d','%s');\",cnt, title))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t_, err = lib.db.Exec(fmt.Sprintf(\"insert into BA values('%d','%s');\",cnt, author))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t_, err = lib.db.Exec(fmt.Sprintf(\"insert into BI values('%d','%s');\",cnt, ISBN))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t_, err = lib.db.Exec(fmt.Sprintf(\"insert into BB values('%d','N');\",cnt))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn nil;\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
CreateBookWithContext is the same as CreateBook with the addition of the ability to pass a context and additional request options. See CreateBook for details on how to use this API operation. The context must be nonnil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create subcontexts for http.Requests. See for more information on using Contexts. | func (c *BookstoreAPI) CreateBookWithContext(ctx aws.Context, input *CreateBookInput, opts ...request.Option) (*CreateBookOutput, error) {
req, out := c.CreateBookRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | [
"func (c *M2) CreateApplicationWithContext(ctx aws.Context, input *CreateApplicationInput, opts ...request.Option) (*CreateApplicationOutput, error) {\n\treq, out := c.CreateApplicationRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *IoTFleetHub) CreateApplicationWithContext(ctx aws.Context, input *CreateApplicationInput, opts ...request.Option) (*CreateApplicationOutput, error) {\n\treq, out := c.CreateApplicationRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c Client) CreateWithContext(context context.Context, input *CreateRoomInput) (*CreateRoomResponse, error) {\n\top := client.Operation{\n\t\tMethod: http.MethodPost,\n\t\tURI: \"/Rooms\",\n\t\tContentType: client.URLEncoded,\n\t}\n\n\tif input == nil {\n\t\tinput = &CreateRoomInput{}\n\t}\n\n\tresponse := &CreateRoomResponse{}\n\tif err := c.client.Send(context, op, input, response); err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, nil\n}",
"func (c *RoboMaker) CreateFleetWithContext(ctx aws.Context, input *CreateFleetInput, opts ...request.Option) (*CreateFleetOutput, error) {\n\treq, out := c.CreateFleetRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Personalize) CreateRecommenderWithContext(ctx aws.Context, input *CreateRecommenderInput, opts ...request.Option) (*CreateRecommenderOutput, error) {\n\treq, out := c.CreateRecommenderRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *RoboMaker) CreateRobotWithContext(ctx aws.Context, input *CreateRobotInput, opts ...request.Option) (*CreateRobotOutput, error) {\n\treq, out := c.CreateRobotRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *WorkLink) CreateFleetWithContext(ctx aws.Context, input *CreateFleetInput, opts ...request.Option) (*CreateFleetOutput, error) {\n\treq, out := c.CreateFleetRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *RoboMaker) CreateRobotApplicationWithContext(ctx aws.Context, input *CreateRobotApplicationInput, opts ...request.Option) (*CreateRobotApplicationOutput, error) {\n\treq, out := c.CreateRobotApplicationRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *MTurk) CreateHITWithContext(ctx aws.Context, input *CreateHITInput, opts ...request.Option) (*CreateHITOutput, error) {\n\treq, out := c.CreateHITRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Ebs) CreateSnapshotWithContext(ctx aws.Context, input *map[string]interface{}, opts ...request.Option) (*map[string]interface{}, error) {\n\treq, out := c.CreateSnapshotRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *DirectConnect) CreateBGPPeerWithContext(ctx aws.Context, input *CreateBGPPeerInput, opts ...request.Option) (*CreateBGPPeerOutput, error) {\n\treq, out := c.CreateBGPPeerRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *RoboMaker) CreateSimulationApplicationWithContext(ctx aws.Context, input *CreateSimulationApplicationInput, opts ...request.Option) (*CreateSimulationApplicationOutput, error) {\n\treq, out := c.CreateSimulationApplicationRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *EventBridge) CreateEventBusWithContext(ctx aws.Context, input *CreateEventBusInput, opts ...request.Option) (*CreateEventBusOutput, error) {\n\treq, out := c.CreateEventBusRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Comprehend) CreateEndpointWithContext(ctx aws.Context, input *CreateEndpointInput, opts ...request.Option) (*CreateEndpointOutput, error) {\n\treq, out := c.CreateEndpointRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Synthetics) CreateCanaryWithContext(ctx aws.Context, input *CreateCanaryInput, opts ...request.Option) (*CreateCanaryOutput, error) {\n\treq, out := c.CreateCanaryRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *EventBridge) CreateArchiveWithContext(ctx aws.Context, input *CreateArchiveInput, opts ...request.Option) (*CreateArchiveOutput, error) {\n\treq, out := c.CreateArchiveRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Comprehend) CreateFlywheelWithContext(ctx aws.Context, input *CreateFlywheelInput, opts ...request.Option) (*CreateFlywheelOutput, error) {\n\treq, out := c.CreateFlywheelRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *EventBridge) CreateEndpointWithContext(ctx aws.Context, input *CreateEndpointInput, opts ...request.Option) (*CreateEndpointOutput, error) {\n\treq, out := c.CreateEndpointRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Ebs) CreateVolumeWithContext(ctx aws.Context, input *map[string]interface{}, opts ...request.Option) (*map[string]interface{}, error) {\n\treq, out := c.CreateVolumeRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DeleteBookWithContext is the same as DeleteBook with the addition of the ability to pass a context and additional request options. See DeleteBook for details on how to use this API operation. The context must be nonnil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create subcontexts for http.Requests. See for more information on using Contexts. | func (c *BookstoreAPI) DeleteBookWithContext(ctx aws.Context, input *DeleteBookInput, opts ...request.Option) (*DeleteBookOutput, error) {
req, out := c.DeleteBookRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | [
"func (c *RoboMaker) DeleteFleetWithContext(ctx aws.Context, input *DeleteFleetInput, opts ...request.Option) (*DeleteFleetOutput, error) {\n\treq, out := c.DeleteFleetRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *WorkLink) DeleteFleetWithContext(ctx aws.Context, input *DeleteFleetInput, opts ...request.Option) (*DeleteFleetOutput, error) {\n\treq, out := c.DeleteFleetRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *CodeStarConnections) DeleteHostWithContext(ctx aws.Context, input *DeleteHostInput, opts ...request.Option) (*DeleteHostOutput, error) {\n\treq, out := c.DeleteHostRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Comprehend) DeleteDocumentClassifierWithContext(ctx aws.Context, input *DeleteDocumentClassifierInput, opts ...request.Option) (*DeleteDocumentClassifierOutput, error) {\n\treq, out := c.DeleteDocumentClassifierRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *M2) DeleteEnvironmentWithContext(ctx aws.Context, input *DeleteEnvironmentInput, opts ...request.Option) (*DeleteEnvironmentOutput, error) {\n\treq, out := c.DeleteEnvironmentRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Ebs) DeleteVolumeWithContext(ctx aws.Context, input *map[string]interface{}, opts ...request.Option) (*map[string]interface{}, error) {\n\treq, out := c.DeleteVolumeRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *RoboMaker) DeleteRobotWithContext(ctx aws.Context, input *DeleteRobotInput, opts ...request.Option) (*DeleteRobotOutput, error) {\n\treq, out := c.DeleteRobotRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *BookstoreAPI) DescribeBookWithContext(ctx aws.Context, input *DescribeBookInput, opts ...request.Option) (*DescribeBookOutput, error) {\n\treq, out := c.DescribeBookRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *MTurk) DeleteHITWithContext(ctx aws.Context, input *DeleteHITInput, opts ...request.Option) (*DeleteHITOutput, error) {\n\treq, out := c.DeleteHITRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Ebs) DeleteSnapshotWithContext(ctx aws.Context, input *map[string]interface{}, opts ...request.Option) (*map[string]interface{}, error) {\n\treq, out := c.DeleteSnapshotRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *BackupGateway) DeleteHypervisorWithContext(ctx aws.Context, input *DeleteHypervisorInput, opts ...request.Option) (*DeleteHypervisorOutput, error) {\n\treq, out := c.DeleteHypervisorRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *AutoScaling) DeleteTagsWithContext(ctx aws.Context, input *DeleteTagsInput, opts ...request.Option) (*DeleteTagsOutput, error) {\n\treq, out := c.DeleteTagsRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Personalize) DeleteRecommenderWithContext(ctx aws.Context, input *DeleteRecommenderInput, opts ...request.Option) (*DeleteRecommenderOutput, error) {\n\treq, out := c.DeleteRecommenderRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Tagv2) DeleteTagWithContext(ctx aws.Context, input *map[string]interface{}, opts ...request.Option) (*map[string]interface{}, error) {\n\treq, out := c.DeleteTagRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Comprehend) DeleteEndpointWithContext(ctx aws.Context, input *DeleteEndpointInput, opts ...request.Option) (*DeleteEndpointOutput, error) {\n\treq, out := c.DeleteEndpointRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Personalize) DeleteEventTrackerWithContext(ctx aws.Context, input *DeleteEventTrackerInput, opts ...request.Option) (*DeleteEventTrackerOutput, error) {\n\treq, out := c.DeleteEventTrackerRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Personalize) DeleteSolutionWithContext(ctx aws.Context, input *DeleteSolutionInput, opts ...request.Option) (*DeleteSolutionOutput, error) {\n\treq, out := c.DeleteSolutionRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Synthetics) DeleteCanaryWithContext(ctx aws.Context, input *DeleteCanaryInput, opts ...request.Option) (*DeleteCanaryOutput, error) {\n\treq, out := c.DeleteCanaryRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func NewDeleteRunbookSnapshotSpacesParamsWithContext(ctx context.Context) *DeleteRunbookSnapshotSpacesParams {\n\tvar ()\n\treturn &DeleteRunbookSnapshotSpacesParams{\n\n\t\tContext: ctx,\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SetBooks sets the Books field's value. | func (s *ListBooksOutput) SetBooks(v []*BookRef) *ListBooksOutput {
s.Books = v
return s
} | [
"func Books() []Book {\n\treturn books\n}",
"func (s *UpdateBookOutput) SetBook(v *BookData) *UpdateBookOutput {\n\ts.Book = v\n\treturn s\n}",
"func (ru *RoomamountUpdate) RemoveBooks(b ...*Books) *RoomamountUpdate {\n\tids := make([]int, len(b))\n\tfor i := range b {\n\t\tids[i] = b[i].ID\n\t}\n\treturn ru.RemoveBookIDs(ids...)\n}",
"func (s *CreateBookOutput) SetBook(v *BookData) *CreateBookOutput {\n\ts.Book = v\n\treturn s\n}",
"func (ruo *RoomamountUpdateOne) RemoveBooks(b ...*Books) *RoomamountUpdateOne {\n\tids := make([]int, len(b))\n\tfor i := range b {\n\t\tids[i] = b[i].ID\n\t}\n\treturn ruo.RemoveBookIDs(ids...)\n}",
"func GetBooks(c *fiber.Ctx) {\n\tc.Send(\"All Books\")\n}",
"func (ru *RoomamountUpdate) AddBooks(b ...*Books) *RoomamountUpdate {\n\tids := make([]int, len(b))\n\tfor i := range b {\n\t\tids[i] = b[i].ID\n\t}\n\treturn ru.AddBookIDs(ids...)\n}",
"func (c *RelationSeriesEditorsController) ListBooks(ctx *app.ListBooksRelationSeriesEditorsContext) error {\n\t// RelationSeriesEditorsController_ListBooks: start_implement\n\treturn c.l.ListBooks(ctx, c.fm, ctx, nil, &ctx.EditorID, nil, &ctx.SeriesID)\n\t// RelationSeriesEditorsController_ListBooks: end_implement\n}",
"func (c *RelationEditorsSeriesController) ListBooks(ctx *app.ListBooksRelationEditorsSeriesContext) error {\n\t// RelationEditorsSeriesController_ListBooks: start_implement\n\treturn c.l.ListBooks(ctx, c.fm, ctx, nil, &ctx.EditorID, nil, &ctx.SeriesID)\n\t// RelationEditorsSeriesController_ListBooks: end_implement\n}",
"func (ruo *RoomamountUpdateOne) AddBooks(b ...*Books) *RoomamountUpdateOne {\n\tids := make([]int, len(b))\n\tfor i := range b {\n\t\tids[i] = b[i].ID\n\t}\n\treturn ruo.AddBookIDs(ids...)\n}",
"func GetBooks(w http.ResponseWriter, r *http.Request) {\n\tlibrary := managers.GetLibrary()\n\terr := writeJSONSuccess(w, library.GetBooks(), http.StatusOK)\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n}",
"func GetBooks(w http.ResponseWriter, r *http.Request) {\n\tlog.D(\"Get all list of books\")\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(books)\n\n\t// for debugging\n\tjsondata, err := json.Marshal(books)\n\tif err != nil {\n\t\tlog.E(\"Cannot encode to Json\", err)\n\t}\n\tlog.D(\"%v\", string(jsondata))\n}",
"func (c *AuthorClient) QueryBooks(a *Author) *BookQuery {\n\tquery := &BookQuery{config: c.config}\n\tquery.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {\n\t\tid := a.ID\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(author.Table, author.FieldID, id),\n\t\t\tsqlgraph.To(book.Table, book.FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2M, false, author.BooksTable, author.BooksPrimaryKey...),\n\t\t)\n\t\tfromV = sqlgraph.Neighbors(a.driver.Dialect(), step)\n\t\treturn fromV, nil\n\t}\n\treturn query\n}",
"func (k Keeper) setOrderBook(ctx sdk.Context, orderBook OrderBook) {\n\tstore := ctx.KVStore(k.storeKey)\n\tbz, err := k.cdc.MarshalBinary(orderBook)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tstore.Set(orderBook.Key, bz)\n}",
"func PostBooks(c *gin.Context) {\n\tvar (\n\t\tbookForm BookForm\n\t\tauthor models.Author\n\t)\n\tc.Bind(&bookForm)\n\tfile, _ := c.FormFile(\"file\")\n\tfilename := strings.Join([]string{bookForm.Name, bookForm.Format}, \".\")\n\tdstname := path.Join(config.Conf.DestPath, filename)\n\tif err := c.SaveUploadedFile(file, dstname); err != nil {\n\t\tc.AbortWithError(http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tuidInterfae, _ := c.Get(\"uid\")\n\tuid := uidInterfae.(uint)\n\tbook := models.Book{\n\t\tName: bookForm.Name,\n\t\tFile: dstname,\n\t\tUserID: uid,\n\t}\n\tif bookForm.AuthorID != 0 {\n\t\tmodels.DB.Find(&author, bookForm.AuthorID)\n\t\tbook.Authors = []*models.Author{&author}\n\t}\n\tmodels.DB.Create(&book)\n\tsearch.Index.Index(fmt.Sprint(book.ID), search.BookIndex{book.Name})\n\tc.JSON(http.StatusCreated, gin.H{\n\t\t\"id\": book.ID,\n\t})\n}",
"func (r *AddDropletRunBook) RunBooks() []RunBook {\n\treturn r.runBooks\n}",
"func (c Controller) GetBooks(db *sql.DB) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\tvar book models.Book\n\t\tvar books []models.Book\n\t\tbookRepo := bookrepository.BookRepository{}\n\t\t// books = bookRepo.GetBooks(db, book, books)\n\t\tdata := bookRepo.GetBooks(db, book, books)\n\t\tresp := u.Message(true, \"success\")\n\t\tresp[\"data\"] = data\n\t\tu.Respond(w, resp)\n\t}\n}",
"func (m *MockClient) GetBooks(ctx context.Context, i *models.GetBooksInput) ([]models.Book, error) {\n\tret := m.ctrl.Call(m, \"GetBooks\", ctx, i)\n\tret0, _ := ret[0].([]models.Book)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (m *MockLister) ListBooks(ctx context.Context, fm Fmodeler, rCtx booksResponse, collectionID, editorID, printID, seriesID *int) error {\n\tret := m.ctrl.Call(m, \"ListBooks\", ctx, fm, rCtx, collectionID, editorID, printID, seriesID)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ListBooksWithContext is the same as ListBooks with the addition of the ability to pass a context and additional request options. See ListBooks for details on how to use this API operation. The context must be nonnil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create subcontexts for http.Requests. See for more information on using Contexts. | func (c *BookstoreAPI) ListBooksWithContext(ctx aws.Context, input *ListBooksInput, opts ...request.Option) (*ListBooksOutput, error) {
req, out := c.ListBooksRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | [
"func (c *BookstoreAPI) ListBooksPagesWithContext(ctx aws.Context, input *ListBooksInput, fn func(*ListBooksOutput, bool) bool, opts ...request.Option) error {\n\tp := request.Pagination{\n\t\tNewRequest: func() (*request.Request, error) {\n\t\t\tvar inCpy *ListBooksInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq, _ := c.ListBooksRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req, nil\n\t\t},\n\t}\n\n\tfor p.Next() {\n\t\tif !fn(p.Page().(*ListBooksOutput), !p.HasNextPage()) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn p.Err()\n}",
"func (c *SsmSap) ListApplicationsWithContext(ctx aws.Context, input *ListApplicationsInput, opts ...request.Option) (*ListApplicationsOutput, error) {\n\treq, out := c.ListApplicationsRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *IoTFleetHub) ListApplicationsWithContext(ctx aws.Context, input *ListApplicationsInput, opts ...request.Option) (*ListApplicationsOutput, error) {\n\treq, out := c.ListApplicationsRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Personalize) ListRecommendersWithContext(ctx aws.Context, input *ListRecommendersInput, opts ...request.Option) (*ListRecommendersOutput, error) {\n\treq, out := c.ListRecommendersRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Personalize) ListDatasetsWithContext(ctx aws.Context, input *ListDatasetsInput, opts ...request.Option) (*ListDatasetsOutput, error) {\n\treq, out := c.ListDatasetsRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (s *Service) ListWithContext(ctx context.Context, pairs ...*types.Pair) (err error) {\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"github.com/Xuanwo/storage/services/qingstor.service.List\")\n\tdefer span.Finish()\n\n\tpairs = append(pairs, ps.WithContext(ctx))\n\treturn s.List(pairs...)\n}",
"func (c *Comprehend) ListDatasetsWithContext(ctx aws.Context, input *ListDatasetsInput, opts ...request.Option) (*ListDatasetsOutput, error) {\n\treq, out := c.ListDatasetsRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Tagv2) ListTagsWithContext(ctx aws.Context, input *map[string]interface{}, opts ...request.Option) (*map[string]interface{}, error) {\n\treq, out := c.ListTagsRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *GuardDuty) ListCoverageWithContext(ctx aws.Context, input *ListCoverageInput, opts ...request.Option) (*ListCoverageOutput, error) {\n\treq, out := c.ListCoverageRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *GuardDuty) ListInvitationsWithContext(ctx aws.Context, input *ListInvitationsInput, opts ...request.Option) (*ListInvitationsOutput, error) {\n\treq, out := c.ListInvitationsRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *GuardDuty) ListMembersWithContext(ctx aws.Context, input *ListMembersInput, opts ...request.Option) (*ListMembersOutput, error) {\n\treq, out := c.ListMembersRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Comprehend) ListEndpointsWithContext(ctx aws.Context, input *ListEndpointsInput, opts ...request.Option) (*ListEndpointsOutput, error) {\n\treq, out := c.ListEndpointsRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (s *Storage) ListWithContext(ctx context.Context, path string, pairs ...*types.Pair) (err error) {\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"github.com/Xuanwo/storage/services/qingstor.storage.List\")\n\tdefer span.Finish()\n\n\tpairs = append(pairs, ps.WithContext(ctx))\n\treturn s.List(path, pairs...)\n}",
"func (c *GuardDuty) ListFindingsWithContext(ctx aws.Context, input *ListFindingsInput, opts ...request.Option) (*ListFindingsOutput, error) {\n\treq, out := c.ListFindingsRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Tagv2) ListResourcesWithContext(ctx aws.Context, input *map[string]interface{}, opts ...request.Option) (*map[string]interface{}, error) {\n\treq, out := c.ListResourcesRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *BookstoreAPI) DescribeBookWithContext(ctx aws.Context, input *DescribeBookInput, opts ...request.Option) (*DescribeBookOutput, error) {\n\treq, out := c.DescribeBookRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *SsmSap) ListOperationsWithContext(ctx aws.Context, input *ListOperationsInput, opts ...request.Option) (*ListOperationsOutput, error) {\n\treq, out := c.ListOperationsRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Personalize) ListSchemasWithContext(ctx aws.Context, input *ListSchemasInput, opts ...request.Option) (*ListSchemasOutput, error) {\n\treq, out := c.ListSchemasRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Personalize) ListSolutionsWithContext(ctx aws.Context, input *ListSolutionsInput, opts ...request.Option) (*ListSolutionsOutput, error) {\n\treq, out := c.ListSolutionsRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ListBooksPagesWithContext same as ListBooksPages except it takes a Context and allows setting request options on the pages. The context must be nonnil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create subcontexts for http.Requests. See for more information on using Contexts. | func (c *BookstoreAPI) ListBooksPagesWithContext(ctx aws.Context, input *ListBooksInput, fn func(*ListBooksOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *ListBooksInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.ListBooksRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*ListBooksOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
} | [
"func (c *M2) ListApplicationsPagesWithContext(ctx aws.Context, input *ListApplicationsInput, fn func(*ListApplicationsOutput, bool) bool, opts ...request.Option) error {\n\tp := request.Pagination{\n\t\tNewRequest: func() (*request.Request, error) {\n\t\t\tvar inCpy *ListApplicationsInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq, _ := c.ListApplicationsRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req, nil\n\t\t},\n\t}\n\n\tfor p.Next() {\n\t\tif !fn(p.Page().(*ListApplicationsOutput), !p.HasNextPage()) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn p.Err()\n}",
"func (c *IoTFleetHub) ListApplicationsPagesWithContext(ctx aws.Context, input *ListApplicationsInput, fn func(*ListApplicationsOutput, bool) bool, opts ...request.Option) error {\n\tp := request.Pagination{\n\t\tNewRequest: func() (*request.Request, error) {\n\t\t\tvar inCpy *ListApplicationsInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq, _ := c.ListApplicationsRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req, nil\n\t\t},\n\t}\n\n\tfor p.Next() {\n\t\tif !fn(p.Page().(*ListApplicationsOutput), !p.HasNextPage()) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn p.Err()\n}",
"func (c *RoboMaker) ListFleetsPagesWithContext(ctx aws.Context, input *ListFleetsInput, fn func(*ListFleetsOutput, bool) bool, opts ...request.Option) error {\n\tp := request.Pagination{\n\t\tNewRequest: func() (*request.Request, error) {\n\t\t\tvar inCpy *ListFleetsInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq, _ := c.ListFleetsRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req, nil\n\t\t},\n\t}\n\n\tfor p.Next() {\n\t\tif !fn(p.Page().(*ListFleetsOutput), !p.HasNextPage()) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn p.Err()\n}",
"func (c *LakeFormation) ListResourcesPagesWithContext(ctx aws.Context, input *ListResourcesInput, fn func(*ListResourcesOutput, bool) bool, opts ...request.Option) error {\n\tp := request.Pagination{\n\t\tNewRequest: func() (*request.Request, error) {\n\t\t\tvar inCpy *ListResourcesInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq, _ := c.ListResourcesRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req, nil\n\t\t},\n\t}\n\n\tcont := true\n\tfor p.Next() && cont {\n\t\tcont = fn(p.Page().(*ListResourcesOutput), !p.HasNextPage())\n\t}\n\treturn p.Err()\n}",
"func (c *MTurk) ListHITsPagesWithContext(ctx aws.Context, input *ListHITsInput, fn func(*ListHITsOutput, bool) bool, opts ...request.Option) error {\n\tp := request.Pagination{\n\t\tNewRequest: func() (*request.Request, error) {\n\t\t\tvar inCpy *ListHITsInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq, _ := c.ListHITsRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req, nil\n\t\t},\n\t}\n\n\tfor p.Next() {\n\t\tif !fn(p.Page().(*ListHITsOutput), !p.HasNextPage()) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn p.Err()\n}",
"func (c *RoboMaker) ListRobotApplicationsPagesWithContext(ctx aws.Context, input *ListRobotApplicationsInput, fn func(*ListRobotApplicationsOutput, bool) bool, opts ...request.Option) error {\n\tp := request.Pagination{\n\t\tNewRequest: func() (*request.Request, error) {\n\t\t\tvar inCpy *ListRobotApplicationsInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq, _ := c.ListRobotApplicationsRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req, nil\n\t\t},\n\t}\n\n\tfor p.Next() {\n\t\tif !fn(p.Page().(*ListRobotApplicationsOutput), !p.HasNextPage()) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn p.Err()\n}",
"func (c *SsmSap) ListComponentsPagesWithContext(ctx aws.Context, input *ListComponentsInput, fn func(*ListComponentsOutput, bool) bool, opts ...request.Option) error {\n\tp := request.Pagination{\n\t\tNewRequest: func() (*request.Request, error) {\n\t\t\tvar inCpy *ListComponentsInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq, _ := c.ListComponentsRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req, nil\n\t\t},\n\t}\n\n\tfor p.Next() {\n\t\tif !fn(p.Page().(*ListComponentsOutput), !p.HasNextPage()) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn p.Err()\n}",
"func (c *WorkLink) ListFleetsPagesWithContext(ctx aws.Context, input *ListFleetsInput, fn func(*ListFleetsOutput, bool) bool, opts ...request.Option) error {\n\tp := request.Pagination{\n\t\tNewRequest: func() (*request.Request, error) {\n\t\t\tvar inCpy *ListFleetsInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq, _ := c.ListFleetsRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req, nil\n\t\t},\n\t}\n\n\tfor p.Next() {\n\t\tif !fn(p.Page().(*ListFleetsOutput), !p.HasNextPage()) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn p.Err()\n}",
"func (c *MTurk) ListWorkerBlocksPagesWithContext(ctx aws.Context, input *ListWorkerBlocksInput, fn func(*ListWorkerBlocksOutput, bool) bool, opts ...request.Option) error {\n\tp := request.Pagination{\n\t\tNewRequest: func() (*request.Request, error) {\n\t\t\tvar inCpy *ListWorkerBlocksInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq, _ := c.ListWorkerBlocksRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req, nil\n\t\t},\n\t}\n\n\tfor p.Next() {\n\t\tif !fn(p.Page().(*ListWorkerBlocksOutput), !p.HasNextPage()) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn p.Err()\n}",
"func (c *Comprehend) ListFlywheelsPagesWithContext(ctx aws.Context, input *ListFlywheelsInput, fn func(*ListFlywheelsOutput, bool) bool, opts ...request.Option) error {\n\tp := request.Pagination{\n\t\tNewRequest: func() (*request.Request, error) {\n\t\t\tvar inCpy *ListFlywheelsInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq, _ := c.ListFlywheelsRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req, nil\n\t\t},\n\t}\n\n\tfor p.Next() {\n\t\tif !fn(p.Page().(*ListFlywheelsOutput), !p.HasNextPage()) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn p.Err()\n}",
"func (c *Personalize) ListRecipesPagesWithContext(ctx aws.Context, input *ListRecipesInput, fn func(*ListRecipesOutput, bool) bool, opts ...request.Option) error {\n\tp := request.Pagination{\n\t\tNewRequest: func() (*request.Request, error) {\n\t\t\tvar inCpy *ListRecipesInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq, _ := c.ListRecipesRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req, nil\n\t\t},\n\t}\n\n\tfor p.Next() {\n\t\tif !fn(p.Page().(*ListRecipesOutput), !p.HasNextPage()) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn p.Err()\n}",
"func (c *Rekognition) ListUsersPagesWithContext(ctx aws.Context, input *ListUsersInput, fn func(*ListUsersOutput, bool) bool, opts ...request.Option) error {\n\tp := request.Pagination{\n\t\tNewRequest: func() (*request.Request, error) {\n\t\t\tvar inCpy *ListUsersInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq, _ := c.ListUsersRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req, nil\n\t\t},\n\t}\n\n\tfor p.Next() {\n\t\tif !fn(p.Page().(*ListUsersOutput), !p.HasNextPage()) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn p.Err()\n}",
"func (c *RoboMaker) ListRobotsPagesWithContext(ctx aws.Context, input *ListRobotsInput, fn func(*ListRobotsOutput, bool) bool, opts ...request.Option) error {\n\tp := request.Pagination{\n\t\tNewRequest: func() (*request.Request, error) {\n\t\t\tvar inCpy *ListRobotsInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq, _ := c.ListRobotsRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req, nil\n\t\t},\n\t}\n\n\tfor p.Next() {\n\t\tif !fn(p.Page().(*ListRobotsOutput), !p.HasNextPage()) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn p.Err()\n}",
"func (_m *SageMakerAPI) ListContextsPagesWithContext(_a0 context.Context, _a1 *sagemaker.ListContextsInput, _a2 func(*sagemaker.ListContextsOutput, bool) bool, _a3 ...request.Option) error {\n\t_va := make([]interface{}, len(_a3))\n\tfor _i := range _a3 {\n\t\t_va[_i] = _a3[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _a0, _a1, _a2)\n\t_ca = append(_ca, _va...)\n\tret := _m.Called(_ca...)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *sagemaker.ListContextsInput, func(*sagemaker.ListContextsOutput, bool) bool, ...request.Option) error); ok {\n\t\tr0 = rf(_a0, _a1, _a2, _a3...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (c *Personalize) ListSolutionsPagesWithContext(ctx aws.Context, input *ListSolutionsInput, fn func(*ListSolutionsOutput, bool) bool, opts ...request.Option) error {\n\tp := request.Pagination{\n\t\tNewRequest: func() (*request.Request, error) {\n\t\t\tvar inCpy *ListSolutionsInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq, _ := c.ListSolutionsRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req, nil\n\t\t},\n\t}\n\n\tfor p.Next() {\n\t\tif !fn(p.Page().(*ListSolutionsOutput), !p.HasNextPage()) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn p.Err()\n}",
"func (c *RoboMaker) ListWorldsPagesWithContext(ctx aws.Context, input *ListWorldsInput, fn func(*ListWorldsOutput, bool) bool, opts ...request.Option) error {\n\tp := request.Pagination{\n\t\tNewRequest: func() (*request.Request, error) {\n\t\t\tvar inCpy *ListWorldsInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq, _ := c.ListWorldsRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req, nil\n\t\t},\n\t}\n\n\tfor p.Next() {\n\t\tif !fn(p.Page().(*ListWorldsOutput), !p.HasNextPage()) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn p.Err()\n}",
"func (c *EBS) ListSnapshotBlocksPagesWithContext(ctx aws.Context, input *ListSnapshotBlocksInput, fn func(*ListSnapshotBlocksOutput, bool) bool, opts ...request.Option) error {\n\tp := request.Pagination{\n\t\tNewRequest: func() (*request.Request, error) {\n\t\t\tvar inCpy *ListSnapshotBlocksInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq, _ := c.ListSnapshotBlocksRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req, nil\n\t\t},\n\t}\n\n\tfor p.Next() {\n\t\tif !fn(p.Page().(*ListSnapshotBlocksOutput), !p.HasNextPage()) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn p.Err()\n}",
"func (c *SsmSap) ListOperationsPagesWithContext(ctx aws.Context, input *ListOperationsInput, fn func(*ListOperationsOutput, bool) bool, opts ...request.Option) error {\n\tp := request.Pagination{\n\t\tNewRequest: func() (*request.Request, error) {\n\t\t\tvar inCpy *ListOperationsInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq, _ := c.ListOperationsRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req, nil\n\t\t},\n\t}\n\n\tfor p.Next() {\n\t\tif !fn(p.Page().(*ListOperationsOutput), !p.HasNextPage()) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn p.Err()\n}",
"func (m *MockRAMAPI) ListResourcesPagesWithContext(arg0 context.Context, arg1 *ram.ListResourcesInput, arg2 func(*ram.ListResourcesOutput, bool) bool, arg3 ...request.Option) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1, arg2}\n\tfor _, a := range arg3 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"ListResourcesPagesWithContext\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SetBookName sets the BookName field's value. | func (s *DescribeBookInput) SetBookName(v string) *DescribeBookInput {
s.BookName = &v
return s
} | [
"func (s *UpdateBookInput) SetBookName(v string) *UpdateBookInput {\n\ts.BookName = &v\n\treturn s\n}",
"func (r *VAdaptor) SetName(n string) {\n\tr.mutex.Lock()\n\tdefer r.mutex.Unlock()\n\n\tr.name = n\n}",
"func (w *workerPool) SetName(name string) {\n\tw.Name = name\n}",
"func (client *Client) SetLoadBalancerName(loadBalancerId string, name string) (err error) {\n\targs := &SetLoadBalancerNameArgs{\n\t\tLoadBalancerId: loadBalancerId,\n\t\tLoadBalancerName: name,\n\t}\n\tresponse := &SetLoadBalancerNameResponse{}\n\terr = client.Invoke(\"SetLoadBalancerName\", args, response)\n\treturn err\n}",
"func (r *Recorder) SetName(name string) { r.name = name }",
"func (m *Driver) SetName(name string) { m.name = name }",
"func (v *VolumeCreate) SetName(Name string) {\n\tv.Name = Name\n}",
"func (o *GetDevicePresentationParams) SetManufacturerName(manufacturerName *string) {\n\to.ManufacturerName = manufacturerName\n}",
"func (recv *Object) SetName(name string) {\n\tc_name := C.CString(name)\n\tdefer C.free(unsafe.Pointer(c_name))\n\n\tC.atk_object_set_name((*C.AtkObject)(recv.native), c_name)\n\n\treturn\n}",
"func (m *RoomMutation) SetRoomName(s string) {\n\tm._RoomName = &s\n}",
"func (a *Attachment) SetAuthorName(text string) {\n\ta.pa.AuthorName = text\n}",
"func (pkg *Package) SetName(name string) {}",
"func (ec *ExcelControl) SetSheetName(sheetname string, sheet_no int) {\n\tss := ec.GetSheet(sheet_no)\n\tif ss != nil {\n\t\tss.SetName(sheetname)\n\t}\n}",
"func SetName(name string) {\n\tconfig.Name = name\n\tsave()\n}",
"func (bc *BankdetailCreate) SetBankName(s string) *BankdetailCreate {\n\tbc.mutation.SetBankName(s)\n\treturn bc\n}",
"func (c *DistributionList) SetName(name string) (err error) {\n\tif len(name) > data.SM_DL_NAME_LEN {\n\t\terr = fmt.Errorf(\"Distribution List name exceed limit. (%d > %d)\", len(name), data.SM_DL_NAME_LEN)\n\t} else {\n\t\tc.name = name\n\t}\n\treturn\n}",
"func (a *Attachment) SetAuthorName(authorName string) interface{} {\n\ta.AuthorName = authorName\n\treturn \"\"\n}",
"func (s CurrencySet) SetName(value string) {\n\ts.RecordCollection.Set(models.NewFieldName(\"Name\", \"name\"), value)\n}",
"func (ueu *UserExtendUpdate) SetSystemName(s string) *UserExtendUpdate {\n\tueu.mutation.SetSystemName(s)\n\treturn ueu\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DescribeBookWithContext is the same as DescribeBook with the addition of the ability to pass a context and additional request options. See DescribeBook for details on how to use this API operation. The context must be nonnil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create subcontexts for http.Requests. See for more information on using Contexts. | func (c *BookstoreAPI) DescribeBookWithContext(ctx aws.Context, input *DescribeBookInput, opts ...request.Option) (*DescribeBookOutput, error) {
req, out := c.DescribeBookRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | [
"func (c *Personalize) DescribeRecipeWithContext(ctx aws.Context, input *DescribeRecipeInput, opts ...request.Option) (*DescribeRecipeOutput, error) {\n\treq, out := c.DescribeRecipeRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *AutoScaling) DescribeTagsWithContext(ctx aws.Context, input *DescribeTagsInput, opts ...request.Option) (*DescribeTagsOutput, error) {\n\treq, out := c.DescribeTagsRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *DirectConnect) DescribeTagsWithContext(ctx aws.Context, input *DescribeTagsInput, opts ...request.Option) (*DescribeTagsOutput, error) {\n\treq, out := c.DescribeTagsRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *RoboMaker) DescribeFleetWithContext(ctx aws.Context, input *DescribeFleetInput, opts ...request.Option) (*DescribeFleetOutput, error) {\n\treq, out := c.DescribeFleetRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Comprehend) DescribeEndpointWithContext(ctx aws.Context, input *DescribeEndpointInput, opts ...request.Option) (*DescribeEndpointOutput, error) {\n\treq, out := c.DescribeEndpointRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *RoboMaker) DescribeWorldWithContext(ctx aws.Context, input *DescribeWorldInput, opts ...request.Option) (*DescribeWorldOutput, error) {\n\treq, out := c.DescribeWorldRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *RoboMaker) DescribeRobotWithContext(ctx aws.Context, input *DescribeRobotInput, opts ...request.Option) (*DescribeRobotOutput, error) {\n\treq, out := c.DescribeRobotRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *EventBridge) DescribeEndpointWithContext(ctx aws.Context, input *DescribeEndpointInput, opts ...request.Option) (*DescribeEndpointOutput, error) {\n\treq, out := c.DescribeEndpointRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Personalize) DescribeRecommenderWithContext(ctx aws.Context, input *DescribeRecommenderInput, opts ...request.Option) (*DescribeRecommenderOutput, error) {\n\treq, out := c.DescribeRecommenderRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *BookstoreAPI) UpdateBookWithContext(ctx aws.Context, input *UpdateBookInput, opts ...request.Option) (*UpdateBookOutput, error) {\n\treq, out := c.UpdateBookRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Personalize) DescribeEventTrackerWithContext(ctx aws.Context, input *DescribeEventTrackerInput, opts ...request.Option) (*DescribeEventTrackerOutput, error) {\n\treq, out := c.DescribeEventTrackerRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *EventBridge) DescribeEventSourceWithContext(ctx aws.Context, input *DescribeEventSourceInput, opts ...request.Option) (*DescribeEventSourceOutput, error) {\n\treq, out := c.DescribeEventSourceRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *BookstoreAPI) ListBooksWithContext(ctx aws.Context, input *ListBooksInput, opts ...request.Option) (*ListBooksOutput, error) {\n\treq, out := c.ListBooksRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *AutoScaling) DescribeLoadBalancersWithContext(ctx aws.Context, input *DescribeLoadBalancersInput, opts ...request.Option) (*DescribeLoadBalancersOutput, error) {\n\treq, out := c.DescribeLoadBalancersRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Personalize) DescribeSchemaWithContext(ctx aws.Context, input *DescribeSchemaInput, opts ...request.Option) (*DescribeSchemaOutput, error) {\n\treq, out := c.DescribeSchemaRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Personalize) DescribeSolutionWithContext(ctx aws.Context, input *DescribeSolutionInput, opts ...request.Option) (*DescribeSolutionOutput, error) {\n\treq, out := c.DescribeSolutionRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *EventBridge) DescribeEventBusWithContext(ctx aws.Context, input *DescribeEventBusInput, opts ...request.Option) (*DescribeEventBusOutput, error) {\n\treq, out := c.DescribeEventBusRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *BookstoreAPI) DeleteBookWithContext(ctx aws.Context, input *DeleteBookInput, opts ...request.Option) (*DeleteBookOutput, error) {\n\treq, out := c.DeleteBookRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Comprehend) DescribeDocumentClassifierWithContext(ctx aws.Context, input *DescribeDocumentClassifierInput, opts ...request.Option) (*DescribeDocumentClassifierOutput, error) {\n\treq, out := c.DescribeDocumentClassifierRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SetBookName sets the BookName field's value. | func (s *UpdateBookInput) SetBookName(v string) *UpdateBookInput {
s.BookName = &v
return s
} | [
"func (s *DescribeBookInput) SetBookName(v string) *DescribeBookInput {\n\ts.BookName = &v\n\treturn s\n}",
"func (r *VAdaptor) SetName(n string) {\n\tr.mutex.Lock()\n\tdefer r.mutex.Unlock()\n\n\tr.name = n\n}",
"func (w *workerPool) SetName(name string) {\n\tw.Name = name\n}",
"func (client *Client) SetLoadBalancerName(loadBalancerId string, name string) (err error) {\n\targs := &SetLoadBalancerNameArgs{\n\t\tLoadBalancerId: loadBalancerId,\n\t\tLoadBalancerName: name,\n\t}\n\tresponse := &SetLoadBalancerNameResponse{}\n\terr = client.Invoke(\"SetLoadBalancerName\", args, response)\n\treturn err\n}",
"func (r *Recorder) SetName(name string) { r.name = name }",
"func (m *Driver) SetName(name string) { m.name = name }",
"func (v *VolumeCreate) SetName(Name string) {\n\tv.Name = Name\n}",
"func (o *GetDevicePresentationParams) SetManufacturerName(manufacturerName *string) {\n\to.ManufacturerName = manufacturerName\n}",
"func (recv *Object) SetName(name string) {\n\tc_name := C.CString(name)\n\tdefer C.free(unsafe.Pointer(c_name))\n\n\tC.atk_object_set_name((*C.AtkObject)(recv.native), c_name)\n\n\treturn\n}",
"func (m *RoomMutation) SetRoomName(s string) {\n\tm._RoomName = &s\n}",
"func (a *Attachment) SetAuthorName(text string) {\n\ta.pa.AuthorName = text\n}",
"func (pkg *Package) SetName(name string) {}",
"func (ec *ExcelControl) SetSheetName(sheetname string, sheet_no int) {\n\tss := ec.GetSheet(sheet_no)\n\tif ss != nil {\n\t\tss.SetName(sheetname)\n\t}\n}",
"func SetName(name string) {\n\tconfig.Name = name\n\tsave()\n}",
"func (bc *BankdetailCreate) SetBankName(s string) *BankdetailCreate {\n\tbc.mutation.SetBankName(s)\n\treturn bc\n}",
"func (c *DistributionList) SetName(name string) (err error) {\n\tif len(name) > data.SM_DL_NAME_LEN {\n\t\terr = fmt.Errorf(\"Distribution List name exceed limit. (%d > %d)\", len(name), data.SM_DL_NAME_LEN)\n\t} else {\n\t\tc.name = name\n\t}\n\treturn\n}",
"func (a *Attachment) SetAuthorName(authorName string) interface{} {\n\ta.AuthorName = authorName\n\treturn \"\"\n}",
"func (s CurrencySet) SetName(value string) {\n\ts.RecordCollection.Set(models.NewFieldName(\"Name\", \"name\"), value)\n}",
"func (ueu *UserExtendUpdate) SetSystemName(s string) *UserExtendUpdate {\n\tueu.mutation.SetSystemName(s)\n\treturn ueu\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SetBook sets the Book field's value. | func (s *UpdateBookOutput) SetBook(v *BookData) *UpdateBookOutput {
s.Book = v
return s
} | [
"func (s *CreateBookOutput) SetBook(v *BookData) *CreateBookOutput {\n\ts.Book = v\n\treturn s\n}",
"func (ap *Adapter) SetAddrBook(addrBook AddrBook) {\n\tap.addrBook = addrBook\n}",
"func (k Keeper) setOrderBook(ctx sdk.Context, orderBook OrderBook) {\n\tstore := ctx.KVStore(k.storeKey)\n\tbz, err := k.cdc.MarshalBinary(orderBook)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tstore.Set(orderBook.Key, bz)\n}",
"func (s *ListBooksOutput) SetBooks(v []*BookRef) *ListBooksOutput {\n\ts.Books = v\n\treturn s\n}",
"func PutBook(r *http.Request) (Book, error) {\n\t// get form values\n\tbk := Book{}\n\tbk.Isbn = r.FormValue(\"isbn\")\n\tbk.Title = r.FormValue(\"title\")\n\tbk.Author = r.FormValue(\"author\")\n\tp := r.FormValue(\"price\")\n\n\t// validate form values\n\tif bk.Isbn == \"\" || bk.Title == \"\" || bk.Author == \"\" || p == \"\" {\n\t\treturn bk, errors.New(\"400. BadRequest\")\n\t}\n\n\t// convert form values\n\tfloat, err := strconv.ParseFloat(p, 32)\n\tif err != nil {\n\t\treturn bk, errors.New(\"400. BadRequest\")\n\t}\n\tbk.Price = float32(float)\n\n\t// insert values\n\terr = config.Books.Insert(bk)\n\tif err != nil {\n\t\treturn bk, errors.New(\"500. Internal Server Error\" + err.Error())\n\n\t}\n\treturn bk, nil\n}",
"func (m *Room) SetBookingType(value *BookingType)() {\n err := m.GetBackingStore().Set(\"bookingType\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (m *RecommendationsMutation) SetRunbook(s string) {\n\tm.runbook = &s\n}",
"func (s *Service) PostBook(book models.Book) (models.Book, error) {\n\tif result := s.DB.Save(&book); result.Error != nil {\n\t\treturn models.Book{}, result.Error\n\t}\n\treturn book, nil\n}",
"func (r *CloudloadingService) UpdateBook(bookscloudloadingresource *BooksCloudloadingResource) *CloudloadingUpdateBookCall {\n\tc := &CloudloadingUpdateBookCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.bookscloudloadingresource = bookscloudloadingresource\n\treturn c\n}",
"func (s *DescribeBookInput) SetBookName(v string) *DescribeBookInput {\n\ts.BookName = &v\n\treturn s\n}",
"func (c controller) RecordBook(b *model.Book) xerror.Xerror {\n\tvar err error\n\tvar querySQL postgres.Query\n\tvar tx postgres.Transaction\n\tvar row postgres.ScanRow\n\n\tif tx, err = c.driverSQL.NewTransaction(); err != nil {\n\t\treturn newInternalErr(err)\n\t}\n\tdefer tx.Rollback()\n\n\t// create book\n\tstr := `INSERT INTO h_book(title, description, genre, publish, owner_id, creation_date) VALUES($1, $2, $3, $4, $5, 'now') RETURNING id, creation_date`\n\tif querySQL, err = postgres.NewQuery(str,\n\t\tb.Title,\n\t\tb.Description,\n\t\tb.Genre,\n\t\tb.Publish,\n\t\tb.Owner,\n\t); err != nil {\n\t\treturn newInternalErr(err)\n\t}\n\tif row, err = tx.WithRow(querySQL); err != nil {\n\t\treturn newInternalErr(err)\n\t}\n\tif err = row.Scan(&b.Identifier, &b.CreationDate); err != nil {\n\t\treturn catchErr(err)\n\t}\n\ttx.Commit()\n\treturn nil\n}",
"func (s *UpdateBookInput) SetBookName(v string) *UpdateBookInput {\n\ts.BookName = &v\n\treturn s\n}",
"func PostBook(w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\n\tbook := model.NewBook()\n\terr := json.NewDecoder(r.Body).Decode(&book)\n\tif err != nil {\n\t\tlog.Errorf(\"%v\", err)\n\t\twriteJSONFail(w, 400, \"The Post Body was invalid\")\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\n\t// validate that the books attributes are in the appropriate bounds\n\terr = book.Validate()\n\tif err != nil {\n\t\twriteJSONFail(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\n\tlibrary := managers.GetLibrary()\n\tlibrary.AddBook(book)\n\n\twriteJSONSuccess(w, \"\", http.StatusCreated)\n}",
"func (api *CRUD) updateBook() µ.Endpoint {\n\tvar (\n\t\tid string\n\t\tbook Book\n\t)\n\n\treturn µ.PUT(\n\t\tµ.Path(path.Is(\"books\"), path.String(&id)),\n\t\tµ.Header(header.ContentJSON()),\n\t\tµ.Body(&book),\n\t\tµ.FMap(func() error {\n\t\t\tbook.ID = iri.New(id)\n\t\t\tif err := api.db.Update(&book, title.Exists()); err != nil {\n\t\t\t\treturn µ.InternalServerError(err)\n\t\t\t}\n\t\t\treturn µ.Ok().JSON(book)\n\t\t}),\n\t)\n}",
"func (t *TransactionMetadata) SetLazyBooked(lazyBooked bool) (modified bool) {\n\tt.lazyBookedMutex.Lock()\n\tdefer t.lazyBookedMutex.Unlock()\n\n\tif t.lazyBooked == lazyBooked {\n\t\treturn\n\t}\n\n\tt.lazyBooked = lazyBooked\n\tt.SetModified()\n\tmodified = true\n\n\treturn\n}",
"func NewBook(c *fiber.Ctx) {\n\tc.Send(\"All Books\")\n}",
"func (o *GetMarketdataHistoryParams) SetBar(bar *string) {\n\to.Bar = bar\n}",
"func (r *CloudloadingService) DeleteBook(volumeId string) *CloudloadingDeleteBookCall {\n\tc := &CloudloadingDeleteBookCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.urlParams_.Set(\"volumeId\", volumeId)\n\treturn c\n}",
"func (lib *Library) AddBook(title, author, ISBN string)error{\n\trows, err := lib.db.Query(\"select count(*) from BT;\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\trows.Next()\n\tvar cnt int\n\terr = rows.Scan(&cnt)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcnt = cnt + 1\n\t_, err = lib.db.Exec(fmt.Sprintf(\"insert into BT values('%d','%s');\",cnt, title))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t_, err = lib.db.Exec(fmt.Sprintf(\"insert into BA values('%d','%s');\",cnt, author))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t_, err = lib.db.Exec(fmt.Sprintf(\"insert into BI values('%d','%s');\",cnt, ISBN))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t_, err = lib.db.Exec(fmt.Sprintf(\"insert into BB values('%d','N');\",cnt))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn nil;\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
UpdateBookWithContext is the same as UpdateBook with the addition of the ability to pass a context and additional request options. See UpdateBook for details on how to use this API operation. The context must be nonnil and will be used for request cancellation. If the context is nil a panic will occur. In the future the SDK may create subcontexts for http.Requests. See for more information on using Contexts. | func (c *BookstoreAPI) UpdateBookWithContext(ctx aws.Context, input *UpdateBookInput, opts ...request.Option) (*UpdateBookOutput, error) {
req, out := c.UpdateBookRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | [
"func (c *M2) UpdateApplicationWithContext(ctx aws.Context, input *UpdateApplicationInput, opts ...request.Option) (*UpdateApplicationOutput, error) {\n\treq, out := c.UpdateApplicationRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Personalize) UpdateRecommenderWithContext(ctx aws.Context, input *UpdateRecommenderInput, opts ...request.Option) (*UpdateRecommenderOutput, error) {\n\treq, out := c.UpdateRecommenderRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *CodeStarConnections) UpdateHostWithContext(ctx aws.Context, input *UpdateHostInput, opts ...request.Option) (*UpdateHostOutput, error) {\n\treq, out := c.UpdateHostRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Finspace) UpdateKxDatabaseWithContext(ctx aws.Context, input *UpdateKxDatabaseInput, opts ...request.Option) (*UpdateKxDatabaseOutput, error) {\n\treq, out := c.UpdateKxDatabaseRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *DirectConnect) UpdateLagWithContext(ctx aws.Context, input *UpdateLagInput, opts ...request.Option) (*Lag, error) {\n\treq, out := c.UpdateLagRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *BackupGateway) UpdateHypervisorWithContext(ctx aws.Context, input *UpdateHypervisorInput, opts ...request.Option) (*UpdateHypervisorOutput, error) {\n\treq, out := c.UpdateHypervisorRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (r *Readarr) UpdateBookContext(ctx context.Context, bookID int64, book *Book, moveFiles bool) error {\n\tvar body bytes.Buffer\n\tif err := json.NewEncoder(&body).Encode(book); err != nil {\n\t\treturn fmt.Errorf(\"json.Marshal(%s): %w\", bpBook, err)\n\t}\n\n\treq := starr.Request{\n\t\tURI: path.Join(bpBook, fmt.Sprint(bookID)),\n\t\tQuery: make(url.Values),\n\t\tBody: &body,\n\t}\n\treq.Query.Add(\"moveFiles\", fmt.Sprint(moveFiles))\n\n\tvar output interface{} // do not know what this looks like.\n\n\tif err := r.PutInto(ctx, req, &output); err != nil {\n\t\treturn fmt.Errorf(\"api.Put(%s): %w\", &req, err)\n\t}\n\n\treturn nil\n}",
"func (c *IoTFleetHub) UpdateApplicationWithContext(ctx aws.Context, input *UpdateApplicationInput, opts ...request.Option) (*UpdateApplicationOutput, error) {\n\treq, out := c.UpdateApplicationRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Comprehend) UpdateFlywheelWithContext(ctx aws.Context, input *UpdateFlywheelInput, opts ...request.Option) (*UpdateFlywheelOutput, error) {\n\treq, out := c.UpdateFlywheelRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Synthetics) UpdateCanaryWithContext(ctx aws.Context, input *UpdateCanaryInput, opts ...request.Option) (*UpdateCanaryOutput, error) {\n\treq, out := c.UpdateCanaryRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *RoboMaker) UpdateRobotApplicationWithContext(ctx aws.Context, input *UpdateRobotApplicationInput, opts ...request.Option) (*UpdateRobotApplicationOutput, error) {\n\treq, out := c.UpdateRobotApplicationRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *EventBridge) UpdateArchiveWithContext(ctx aws.Context, input *UpdateArchiveInput, opts ...request.Option) (*UpdateArchiveOutput, error) {\n\treq, out := c.UpdateArchiveRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Comprehend) UpdateEndpointWithContext(ctx aws.Context, input *UpdateEndpointInput, opts ...request.Option) (*UpdateEndpointOutput, error) {\n\treq, out := c.UpdateEndpointRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *RoboMaker) UpdateSimulationApplicationWithContext(ctx aws.Context, input *UpdateSimulationApplicationInput, opts ...request.Option) (*UpdateSimulationApplicationOutput, error) {\n\treq, out := c.UpdateSimulationApplicationRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *Client) UpdateAddonWithContext(ctx context.Context, id string, a Addon) (*Addon, error) {\n\td := map[string]Addon{\n\t\t\"addon\": a,\n\t}\n\n\tresp, err := c.put(ctx, \"/addons/\"+id, d, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn getAddonFromResponse(c, resp)\n}",
"func (c *BookstoreAPI) DescribeBookWithContext(ctx aws.Context, input *DescribeBookInput, opts ...request.Option) (*DescribeBookOutput, error) {\n\treq, out := c.DescribeBookRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *GuardDuty) UpdateFilterWithContext(ctx aws.Context, input *UpdateFilterInput, opts ...request.Option) (*UpdateFilterOutput, error) {\n\treq, out := c.UpdateFilterRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *GuardDuty) UpdateDetectorWithContext(ctx aws.Context, input *UpdateDetectorInput, opts ...request.Option) (*UpdateDetectorOutput, error) {\n\treq, out := c.UpdateDetectorRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}",
"func (c *WorkLink) UpdateFleetMetadataWithContext(ctx aws.Context, input *UpdateFleetMetadataInput, opts ...request.Option) (*UpdateFleetMetadataOutput, error) {\n\treq, out := c.UpdateFleetMetadataRequest(input)\n\treq.SetContext(ctx)\n\treq.ApplyOptions(opts...)\n\treturn out, req.Send()\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
displayProcessTree displays a tree of all the running Go processes. | func DisplayProcessTree(ps models.Process) {
pstree = make(map[int][]goprocess.P)
for _, p := range ps.Process {
pstree[p.PPID] = append(pstree[p.PPID], p)
}
tree := treeprint.New()
tree.SetValue("...")
seen := map[int]bool{}
for _, p := range ps.Process {
constructProcessTree(p.PPID, p, seen, tree)
}
fmt.Println(tree.String())
} | [
"func PrintProcessTree() {\n\tfmt.Println(GetProcessTree().Print())\n}",
"func GetProcessTree() gotree.Tree {\n\tlist := make(map[int][]*process)\n\tprocesses, err := GetProcessStats()\n\tutil.ErrorCheck(err)\n\n\tfor _, ps := range processes {\n\t\tif ps.IsGhostProcess() {\n\t\t\tcontinue\n\t\t}\n\t\tp := new(process)\n\t\tp.Name = ps.Name\n\t\tp.Pid = strconv.Itoa(ps.Pid)\n\t\tlist[ps.Ppid] = append(list[ps.Ppid], p)\n\t}\n\n\tppids := []int{}\n\n\tfor key := range list {\n\t\tppids = append(ppids, key)\n\t}\n\n\tsort.Ints(ppids)\n\tvar gparent = gotree.New(\"0\")\n\tfor _, ppid := range ppids {\n\t\tif ppid == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tparent := gotree.New(strconv.FormatInt(int64(ppid), 10))\n\n\t\tfor _, ps := range list[ppid] {\n\t\t\tprocessInfo := ps.Name + \"(\" + ps.Pid + \")\"\n\t\t\tparent.Add(processInfo)\n\t\t}\n\t\tgparent.AddTree(parent)\n\t}\n\treturn gparent\n}",
"func ProcTree(ssh []string, columns ...string) (*ProcNode, error) {\n\treturn pstree(concat(ssh, makePsCommand(columns)))\n}",
"func constructProcessTree(ppid int, p goprocess.P, seen map[int]bool, tree treeprint.Tree) {\n\tif seen[ppid] {\n\t\treturn\n\t}\n\tseen[ppid] = true\n\tif ppid != p.PPID {\n\t\toutput := strconv.Itoa(ppid) + \" (\" + p.Path + \" \" + p.Arguments + \" \" + fmt.Sprintf(\"user:%v, uid:%v\",\n\t\t\tp.Username, p.Uid) + \")\"\n\t\ttree = tree.AddBranch(output)\n\t} else {\n\t\toutput := strconv.Itoa(ppid) + \" (\" + p.Path + \" \" + p.Arguments + \" \" + fmt.Sprintf(\"user:%v, uid:%v\",\n\t\t\tp.Username, p.Uid) + \")\"\n\t\ttree = tree.AddBranch(output)\n\t}\n\tfor index := range pstree[ppid] {\n\t\tp := pstree[ppid][index]\n\t\tconstructProcessTree(p.PID, p, seen, tree)\n\t}\n}",
"func (t *Btree) PrintTree() {\n\tfmt.Println(\"-----------Tree-------------\")\n\tfor i := 0; i < int(t.GetIndexCursor()); i++ {\n\t\tif node, err := t.getTreeNode(int64(i)); err == nil {\n\t\t\tif node.GetIsDirt() == 0 {\n\t\t\t\tnode.printNode()\n\t\t\t\tfmt.Println(\"--------\")\n\t\t\t}\n\t\t}\n\t}\n}",
"func (t *Tree) DisplayCommands(w io.Writer) {\n\tmaxNameLen := 0\n\tfor _, c := range t.Commands {\n\t\tif len(c.Name) > maxNameLen {\n\t\t\tmaxNameLen = len(c.Name)\n\t\t}\n\t}\n\n\tfmt.Fprintf(w, \"%s commands:\\n\", t.Title)\n\tfor _, c := range t.Commands {\n\t\tif c.Brief != \"\" {\n\t\t\tfmt.Fprintf(w, \" %-*s %s\\n\", maxNameLen, c.Name, c.Brief)\n\t\t}\n\t}\n\tfmt.Fprintln(w)\n}",
"func (g Graph) Display() {\n\tfor _, node := range g {\n\t\tif len(node.Deps) <= 0 {\n\t\t\tfmt.Println(node.Name)\n\t\t} else {\n\t\t\tfor _, dep := range node.Deps {\n\t\t\t\tfmt.Printf(\"%s -> %s\\n\", node.Name, dep)\n\t\t\t}\n\t\t}\n\t}\n}",
"func (tree *Tree) Display() string {\n\tvar out []byte\n\tdst := bytes.NewBuffer(out)\n\terr := json.Indent(dst, []byte(tree.String()), \"\", \" \")\n\tCatch(err)\n\treturn dst.String()\n}",
"func TreePrint(root Node) string {\n\ttree := tp.New()\n\n\tp(root, tree)\n\treturn tree.String()\n}",
"func runTree(cmd *cobra.Command, args []string) {\n\tfor _, arg := range args {\n\t\tif err := treeTrack(arg); err != nil {\n\t\t\tui.PrintError(err)\n\t\t}\n\t}\n}",
"func Tree(tl *stackparse.Timeline) string {\n\tvar sb strings.Builder\n\n\tsb.WriteString(fmt.Sprintf(\"%d samples over %s\\n\", tl.Samples, tl.End.Sub(tl.Start)))\n\n\tgor := []int{}\n\n\tfor k := range tl.Goroutines {\n\t\tgor = append(gor, k)\n\t}\n\n\tsort.Ints(gor)\n\n\tfor _, gid := range gor {\n\t\tg := tl.Goroutines[gid]\n\t\tsb.WriteString(fmt.Sprintf(\"goroutine %d (%s)\\n\", gid, g.Signature.CreatedByString(true)))\n\n\t\tfor i, l := range g.Layers {\n\t\t\tfor _, c := range l.Calls {\n\t\t\t\tif c.Samples > 1 {\n\t\t\t\t\tsb.WriteString(fmt.Sprintf(\" %s %s execution time: %s (%d samples)\\n\", strings.Repeat(\" \", i), c.Name, c.EndDelta-c.StartDelta, c.Samples))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tsb.WriteString(\"\\n\")\n\t}\n\n\treturn sb.String()\n}",
"func ProcessList(conf *Config, cache *osCache, logger logrus.FieldLogger) ([]*TopProcess, error) {\n\tvar fs procfs.FS\n\tvar err error\n\tif conf.ProcPath == \"\" {\n\t\tfs, err = procfs.NewDefaultFS()\n\t} else {\n\t\tfs, err = procfs.NewFS(conf.ProcPath)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprocs, err := fs.AllProcs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thostMem, _ := fs.Meminfo()\n\n\tvar out []*TopProcess\n\tfor _, p := range procs {\n\t\tstat, err := p.Stat()\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tstatus, err := p.NewStatus()\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tcmdLine, _ := p.CmdLine()\n\t\tif len(cmdLine) == 0 {\n\t\t\tcomm, _ := p.Comm()\n\t\t\tcmdLine = []string{comm}\n\t\t}\n\n\t\tst, _ := stat.StartTime()\n\n\t\tusername := \"\"\n\t\tuid := status.UIDs[0]\n\t\tif uid != \"\" {\n\t\t\tcachedUser := cache.uidCache[uid]\n\t\t\tif cachedUser != nil {\n\t\t\t\tusername = cachedUser.Username\n\t\t\t} else {\n\t\t\t\tuser, err := user.LookupId(uid)\n\t\t\t\tif err == nil {\n\t\t\t\t\tcache.uidCache[uid] = user\n\t\t\t\t\tusername = user.Username\n\t\t\t\t} else if logger != nil {\n\t\t\t\t\tlogger.WithError(err).Debugf(\"Could not lookup user id %s for process id %d\", uid, p.PID)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tvar memPercent float64\n\t\tif hostMem.MemTotal != nil {\n\t\t\tmemPercent = 100.0 * float64(stat.RSS*cache.pageSize) / float64(*hostMem.MemTotal*1024)\n\t\t}\n\n\t\tout = append(out, &TopProcess{\n\t\t\tProcessID: p.PID,\n\t\t\tCreatedTime: time.Unix(int64(st), 0),\n\t\t\tUsername: username,\n\t\t\tPriority: stat.Priority,\n\t\t\tNice: &stat.Nice,\n\t\t\tVirtualMemoryBytes: uint64(stat.VirtualMemory()),\n\t\t\tWorkingSetSizeBytes: uint64(stat.RSS * cache.pageSize),\n\t\t\tSharedMemBytes: status.RssShmem + status.RssFile,\n\t\t\tStatus: stat.State,\n\t\t\tMemPercent: memPercent,\n\t\t\t// gopsutil scales the times to seconds already\n\t\t\tTotalCPUTime: time.Duration(stat.CPUTime() * float64(time.Second)),\n\t\t\tCommand: strings.Join(cmdLine, \" \"),\n\t\t})\n\t}\n\treturn out, nil\n}",
"func TypesTree(node Node) (str string) {\n\tstr += fmt.Sprintf(\"\\nTypes tree:\\n\")\n\tstr += typesTree(node, 0)\n\treturn str\n}",
"func CreateProcessTree(caseinfo CaseInformation, computername string, filename string) []byte {\n xmlData,_ := ioutil.ReadFile(filename)\n var rl RlProcessMemory\n var auditinfo RlAuditType\n\n rl.ParseAuditData(computername, caseinfo, auditinfo, xmlData) \n\n var root ProcessTree\n root.ParentPid = 0\n root.Pid = 0\n root.Name = \"Start\"\n\n var PpidList []int \n var PidList []int\n var ptList []ProcessTree\n\n for _,ml := range rl.ProcessList {\n \n var pt ProcessTree\n\n pt.ParentPid = ml.ParentPid\n pt.Pid = ml.Pid\n pt.Path = ml.Path\n pt.Name = ml.Name\n pt.Arguments = ml.Arguments\n pt.StartTime = ml.StartTime\n pt.Username = ml.Username\n\n ptList = append(ptList, pt)\n }\n\n ptList = QSortPpid(ptList)\n\n for i,pt := range ptList {\n PpidList = append(PpidList, ptList[i].ParentPid)\n PidList = append(PidList, ptList[i].Pid)\n \n FlagParentExists := ParentExists(ptList[i], &root)\n\n if !FlagParentExists {\n var ParentName string = fmt.Sprintf(\"%d\", pt.ParentPid)\n var node = ProcessTree {ParentPid: 0, Pid: pt.ParentPid, Name: ParentName}\n node.AddChildNode(&ptList[i])\n\n root.AddChildNode(&node)\n } \n } \n\n // Create RlRecord object compatible with ElasticSearch mapping\n var ptAuditType = RlAuditType{Generator: PTGenerator, GeneratorVersion: PTGeneratorVersion}\n var ptRecord RlRecord\n ptRecord.ComputerName = computername \n ptRecord.CaseInfo = caseinfo \n ptRecord.AuditType = ptAuditType\n ptRecord.Record = root\n\n jsonData,_ := json.Marshal(ptRecord)\n return jsonData\n }",
"func treeView(out io.Writer, objs objectDirectory, obj unstructured.Unstructured) {\n\ttbl := uitable.New()\n\ttbl.Separator = \" \"\n\ttbl.AddRow(\"NAMESPACE\", \"NAME\", \"READY\", \"REASON\", \"AGE\")\n\ttreeViewInner(\"\", tbl, objs, obj)\n\tfmt.Fprintln(color.Output, tbl)\n}",
"func (journal TaskJournal) Tree() string {\n\tif len(journal.TaskList) == 0 {\n\t\treturn fmt.Sprintf(\"\\n%s\\n\\n\", notasks)\n\t}\n\ts := \"\"\n\ttree := TreeString(journal.TaskList)\n\tif tree[0] != '\\n' {\n\t\t// We add a line return for a pretty look\n\t\ts += fmt.Sprintln()\n\t}\n\ts += tree\n\ts += fmt.Sprintf(\"\\nLegend: %s %s %s\\n\", StatusTodo.legend(), StatusDoing.legend(), StatusDone.legend())\n\treturn s\n}",
"func KillTree(pid int32) error {\n\tproc, err := process.NewProcess(pid)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"NewProcess failed\")\n\t}\n\n\tchildren, err := proc.Children()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Children failed\")\n\t}\n\n\tfor _, proc := range children {\n\t\terr = KillTree(proc.Pid)\n\t\tif err != nil {\n\t\t\tlogs.Warnf(\"Failed to kill child tree: %+v\\n\", err)\n\t\t}\n\t}\n\n\treturn proc.Kill()\n}",
"func (build *Build) Tree() {\n\tfmt.Println(name(build.File))\n\tfor index, parent := range build.Parents {\n\t\tparent.SubTree(\"\", index < len(build.Parents)-1)\n\t}\n}",
"func (tree *Tree) Print() string {\n\tvar b strings.Builder\n\tfor _, entry := range tree.Entries {\n\t\t// Prepend 0 in front of mode to make it 6 char long.\n\t\tentryMode := strings.Repeat(\"0\", 6-len(entry.mode)) + entry.mode\n\n\t\tfmt.Fprintf(&b, \"%s %s %s\\t%s\\n\",\n\t\t\tentryMode, entry.objType, entry.hash, entry.name)\n\t}\n\n\treturn b.String()\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
constructProcessTree constructs the process tree in a depthfirst fashion. | func constructProcessTree(ppid int, p goprocess.P, seen map[int]bool, tree treeprint.Tree) {
if seen[ppid] {
return
}
seen[ppid] = true
if ppid != p.PPID {
output := strconv.Itoa(ppid) + " (" + p.Path + " " + p.Arguments + " " + fmt.Sprintf("user:%v, uid:%v",
p.Username, p.Uid) + ")"
tree = tree.AddBranch(output)
} else {
output := strconv.Itoa(ppid) + " (" + p.Path + " " + p.Arguments + " " + fmt.Sprintf("user:%v, uid:%v",
p.Username, p.Uid) + ")"
tree = tree.AddBranch(output)
}
for index := range pstree[ppid] {
p := pstree[ppid][index]
constructProcessTree(p.PID, p, seen, tree)
}
} | [
"func CreateProcessTree(caseinfo CaseInformation, computername string, filename string) []byte {\n xmlData,_ := ioutil.ReadFile(filename)\n var rl RlProcessMemory\n var auditinfo RlAuditType\n\n rl.ParseAuditData(computername, caseinfo, auditinfo, xmlData) \n\n var root ProcessTree\n root.ParentPid = 0\n root.Pid = 0\n root.Name = \"Start\"\n\n var PpidList []int \n var PidList []int\n var ptList []ProcessTree\n\n for _,ml := range rl.ProcessList {\n \n var pt ProcessTree\n\n pt.ParentPid = ml.ParentPid\n pt.Pid = ml.Pid\n pt.Path = ml.Path\n pt.Name = ml.Name\n pt.Arguments = ml.Arguments\n pt.StartTime = ml.StartTime\n pt.Username = ml.Username\n\n ptList = append(ptList, pt)\n }\n\n ptList = QSortPpid(ptList)\n\n for i,pt := range ptList {\n PpidList = append(PpidList, ptList[i].ParentPid)\n PidList = append(PidList, ptList[i].Pid)\n \n FlagParentExists := ParentExists(ptList[i], &root)\n\n if !FlagParentExists {\n var ParentName string = fmt.Sprintf(\"%d\", pt.ParentPid)\n var node = ProcessTree {ParentPid: 0, Pid: pt.ParentPid, Name: ParentName}\n node.AddChildNode(&ptList[i])\n\n root.AddChildNode(&node)\n } \n } \n\n // Create RlRecord object compatible with ElasticSearch mapping\n var ptAuditType = RlAuditType{Generator: PTGenerator, GeneratorVersion: PTGeneratorVersion}\n var ptRecord RlRecord\n ptRecord.ComputerName = computername \n ptRecord.CaseInfo = caseinfo \n ptRecord.AuditType = ptAuditType\n ptRecord.Record = root\n\n jsonData,_ := json.Marshal(ptRecord)\n return jsonData\n }",
"func GetProcessTree() gotree.Tree {\n\tlist := make(map[int][]*process)\n\tprocesses, err := GetProcessStats()\n\tutil.ErrorCheck(err)\n\n\tfor _, ps := range processes {\n\t\tif ps.IsGhostProcess() {\n\t\t\tcontinue\n\t\t}\n\t\tp := new(process)\n\t\tp.Name = ps.Name\n\t\tp.Pid = strconv.Itoa(ps.Pid)\n\t\tlist[ps.Ppid] = append(list[ps.Ppid], p)\n\t}\n\n\tppids := []int{}\n\n\tfor key := range list {\n\t\tppids = append(ppids, key)\n\t}\n\n\tsort.Ints(ppids)\n\tvar gparent = gotree.New(\"0\")\n\tfor _, ppid := range ppids {\n\t\tif ppid == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tparent := gotree.New(strconv.FormatInt(int64(ppid), 10))\n\n\t\tfor _, ps := range list[ppid] {\n\t\t\tprocessInfo := ps.Name + \"(\" + ps.Pid + \")\"\n\t\t\tparent.Add(processInfo)\n\t\t}\n\t\tgparent.AddTree(parent)\n\t}\n\treturn gparent\n}",
"func DisplayProcessTree(ps models.Process) {\n\tpstree = make(map[int][]goprocess.P)\n\tfor _, p := range ps.Process {\n\t\tpstree[p.PPID] = append(pstree[p.PPID], p)\n\t}\n\ttree := treeprint.New()\n\ttree.SetValue(\"...\")\n\tseen := map[int]bool{}\n\tfor _, p := range ps.Process {\n\t\tconstructProcessTree(p.PPID, p, seen, tree)\n\t}\n\tfmt.Println(tree.String())\n}",
"func ProcTree(ssh []string, columns ...string) (*ProcNode, error) {\n\treturn pstree(concat(ssh, makePsCommand(columns)))\n}",
"func PrintProcessTree() {\n\tfmt.Println(GetProcessTree().Print())\n}",
"func buildTree(preorder []int, inorder []int) *TreeNode {\n\treturn build(preorder, 0, len(preorder), inorder, 0, len(inorder))\n}",
"func newConsensusProcess(\n\tctx context.Context,\n\tcfg config.Config,\n\tlayer types.LayerID,\n\ts *Set,\n\toracle Rolacle,\n\tstateQuerier stateQuerier,\n\tsigning *signing.EdSigner,\n\tedVerifier *signing.EdVerifier,\n\tet *EligibilityTracker,\n\tnid types.NodeID,\n\tp2p pubsub.Publisher,\n\tcomm communication,\n\tev roleValidator,\n\tclock RoundClock,\n\tlogger log.Log,\n) *consensusProcess {\n\tproc := &consensusProcess{\n\t\tState: State{\n\t\t\tround: preRound,\n\t\t\tcommittedRound: preRound,\n\t\t\tvalue: s.Clone(),\n\t\t},\n\t\tlayer: layer,\n\t\toracle: oracle,\n\t\tsigner: signing,\n\t\tnid: nid,\n\t\tpublisher: p2p,\n\t\tcfg: cfg,\n\t\tcomm: comm,\n\t\tpending: make(map[types.NodeID]*Message, cfg.N),\n\t\tLog: logger,\n\t\tmTracker: newMsgsTracker(),\n\t\teTracker: et,\n\t\tclock: clock,\n\t}\n\tproc.ctx, proc.cancel = context.WithCancel(ctx)\n\tproc.preRoundTracker = newPreRoundTracker(logger.WithContext(proc.ctx).WithFields(proc.layer), comm.mchOut, proc.eTracker, cfg.N/2+1, cfg.N)\n\tproc.validator = newSyntaxContextValidator(signing, edVerifier, cfg.N/2+1, proc.statusValidator(), stateQuerier, ev, proc.mTracker, proc.eTracker, logger)\n\n\treturn proc\n}",
"func createTree(lr string, d int, parent int) *TreeNode {\n\tvar a string\n\n\tif lr == \"root\" {\n\t\tfmt.Printf(\"Please input the deep: %d %s node of root:\\r\\n\", d, lr)\n\t} else {\n\t\tfmt.Printf(\"Please input the deep: %d %s node of node: %d:\\r\\n\", d, lr, parent)\n\t}\n\n\tfmt.Scanln(&a)\n\t//Stop to creating the branch by typing '#'\n\tif a == \"#\" {\n\t\tfmt.Printf(\"Stop traversal on deep: %d\\r\\n\", d)\n\t\treturn nil\n\t}\n\ttree := new(TreeNode)\n\ttree.Val, _ = strconv.Atoi(a)\n\tfmt.Printf(\"Start create deep: %d node: %d.\\r\\n\", d, tree.Val)\n\tif lr != \"root\" {\n\t\tfmt.Printf(\"**Current parent node: %d\\r\\n\", parent)\n\t}\n\ttree.Left = createTree(\"left\", d+1, tree.Val)\n\tfmt.Printf(\"Back to deep: %d node: %d.\\r\\n\", d, tree.Val)\n\ttree.Right = createTree(\"right\", d+1, tree.Val)\n\tfmt.Printf(\"Back to deep: %d node: %d.\\r\\n\", d, tree.Val)\n\n\tfmt.Printf(\"Create deep: %d node: %d successed.\\r\\n\", d, tree.Val)\n\treturn tree\n}",
"func (t *Tree) Build(\n\tctx context.Context, sm *SortedMap, txi TxInfo) (err error) {\n\tt.Lock()\n\tdefer t.Unlock()\n\n\tvar prevRoot, nextRoot Hash\n\tif prevRoot, err = t.eng.LookupRoot(ctx); err != nil {\n\t\treturn err\n\t}\n\tif nextRoot, err = t.hashTreeRecursive(ctx,\n\t\tLevel(0), sm, prevRoot); err != nil {\n\t\treturn err\n\t}\n\n\tif err = t.eng.CommitRoot(ctx, prevRoot, nextRoot, txi); err != nil {\n\t\treturn err\n\t}\n\n\treturn err\n}",
"func newProcess(conf *config.CHYLE) *process {\n\treturn &process{\n\t\tmatchers.Create(conf.FEATURES.MATCHERS, conf.MATCHERS),\n\t\textractors.Create(conf.FEATURES.EXTRACTORS, conf.EXTRACTORS),\n\t\tdecorators.Create(conf.FEATURES.DECORATORS, conf.DECORATORS),\n\t\tsenders.Create(conf.FEATURES.SENDERS, conf.SENDERS),\n\t}\n}",
"func makeTree() (*Tree, map[uint64]NodeComponent) {\n\t// tree\n\t// ----\n\t// 5 <-- level 0\n\t// / \\\n\t// 3 8 <-- level 1\n\t// / \\ / \\\n\t// 1 4 6 9 <-- level 2\n\t// / \\\n\t// 2 <-- level 3\n\n\tnodes := make(map[uint64]NodeComponent)\n\n\tone := NewNode(1)\n\tnodes[1] = one\n\ttwo := NewNode(2)\n\tnodes[2] = two\n\tthree := NewNode(3)\n\tnodes[3] = three\n\tfour := NewNode(4)\n\tnodes[4] = four\n\tfive := NewNode(5)\n\tnodes[5] = five\n\tsix := NewNode(6)\n\tnodes[6] = six\n\teight := NewNode(8)\n\tnodes[8] = eight\n\tnine := NewNode(9)\n\tnodes[9] = nine\n\n\ttree, err := NewTree(\n\t\tfive,\n\t\tthree, one, four,\n\t\ttwo,\n\t\teight,\n\t\tsix, nine,\n\t)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn tree, nodes\n}",
"func ReConstructBiTree(preorder, inorder []int) *datastructure.BST {\n\tif preorder == nil || inorder == nil || len(preorder) < 1 || len(inorder) < 1 {\n\t\treturn nil\n\t}\n\n\troot := construct(preorder, inorder)\n\ttree := datastructure.BST{}\n\ttree.Root = root\n\treturn &tree\n}",
"func newProcess(c *Container, spec *oci.Process, process runtime.Process, pid uint32, init bool) *Process {\n\tp := &Process{\n\t\tc: c,\n\t\tspec: spec,\n\t\tprocess: process,\n\t\tinit: init,\n\t\tcid: c.id,\n\t\tpid: pid,\n\t}\n\tp.exitWg.Add(1)\n\tp.writersWg.Add(1)\n\tgo func() {\n\t\tctx, span := trace.StartSpan(context.Background(), \"newProcess::waitBackground\")\n\t\tdefer span.End()\n\t\tspan.AddAttributes(\n\t\t\ttrace.StringAttribute(\"cid\", p.cid),\n\t\t\ttrace.Int64Attribute(\"pid\", int64(p.pid)))\n\n\t\t// Wait for the process to exit\n\t\texitCode, err := p.process.Wait()\n\t\tif err != nil {\n\t\t\tlog.G(ctx).WithError(err).Error(\"failed to wait for runc process\")\n\t\t}\n\t\tp.exitCode = exitCode\n\t\tlog.G(ctx).WithField(\"exitCode\", p.exitCode).Debug(\"process exited\")\n\n\t\t// Free any process waiters\n\t\tp.exitWg.Done()\n\t\t// Decrement any container process count waiters\n\t\tc.processesMutex.Lock()\n\t\tc.processesWg.Done()\n\t\tc.processesMutex.Unlock()\n\n\t\t// Schedule the removal of this process object from the map once at\n\t\t// least one waiter has read the result\n\t\tgo func() {\n\t\t\tp.writersWg.Wait()\n\t\t\tc.processesMutex.Lock()\n\n\t\t\t_, span := trace.StartSpan(context.Background(), \"newProcess::waitBackground::waitAllWaiters\")\n\t\t\tdefer span.End()\n\t\t\tspan.AddAttributes(\n\t\t\t\ttrace.StringAttribute(\"cid\", p.cid),\n\t\t\t\ttrace.Int64Attribute(\"pid\", int64(p.pid)))\n\n\t\t\tdelete(c.processes, p.pid)\n\t\t\tc.processesMutex.Unlock()\n\t\t}()\n\t}()\n\treturn p\n}",
"func buildTree(point grid.Point, rest grid.Path, pathfinderFn PathFinderFunc) *Node {\n\tnode := newNode(point, rest)\n\n\tif len(rest.Points) == 0 {\n\t\treturn node\n\t}\n\n\tc := pathfinderFn(point, rest)\n\tfor _, p := range c.Points {\n\t\tnewPath := grid.Remove(rest, p)\n\t\tnode.SubNodes = append(node.SubNodes, buildTree(p, newPath, pathfinderFn))\n\t}\n\n\treturn node\n}",
"func ConstructTreeBasedDistExec(sctx sessionctx.Context, p plannercore.PhysicalPlan) ([]*tipb.Executor, error) {\n\texecPB, err := p.ToPB(sctx, kv.TiFlash)\n\treturn []*tipb.Executor{execPB}, err\n}",
"func NewPreorderTree(nums ...int) *TreeNode {\n\treturn (&preorderTree{nums}).constructTree()\n}",
"func BSTfromPreOrder(array []interface{}) *Node {\n\tvar maxx int\n\tmaxx = 999999999\n\troot := &Node{\n\t\tData: array[0],\n\t}\n\ttreeBuilderHelper(array, root, 1, 0, maxx)\n\treturn root\n}",
"func TestKillProcessTree(t *testing.T) {\n\ttestCases := []struct {\n\t\tparentProc int\n\t\tprocesses []Process\n\t\tkillList []int\n\t\tlistFailure bool\n\t\tkillFailure bool\n\t\tfailure bool\n\t}{\n\t\t{\n\t\t\t// One process only.\n\t\t\tparentProc: 10,\n\t\t\tprocesses: []Process{\n\t\t\t\t{10, 1, \"process\"},\n\t\t\t},\n\t\t\tkillList: []int{10},\n\t\t\tlistFailure: false,\n\t\t\tkillFailure: false,\n\t\t\tfailure: false,\n\t\t},\n\t\t{\n\t\t\t// One process with two children and one unrelated process.\n\t\t\tparentProc: 10,\n\t\t\tprocesses: []Process{\n\t\t\t\t{10, 1, \"parent\"},\n\t\t\t\t{20, 10, \"child1\"},\n\t\t\t\t{30, 10, \"child2\"},\n\t\t\t\t{40, 1, \"otherprocess\"},\n\t\t\t},\n\t\t\tkillList: []int{10, 20, 30},\n\t\t\tlistFailure: false,\n\t\t\tkillFailure: false,\n\t\t\tfailure: false,\n\t\t},\n\t\t{\n\t\t\t// One process with three unrelated processes.\n\t\t\tparentProc: 10,\n\t\t\tprocesses: []Process{\n\t\t\t\t{10, 1, \"parent\"},\n\t\t\t\t{20, 15, \"otherprocess\"},\n\t\t\t\t{30, 15, \"otherprocess\"},\n\t\t\t\t{40, 1, \"otherprocess\"},\n\t\t\t},\n\t\t\tkillList: []int{10},\n\t\t\tlistFailure: false,\n\t\t\tkillFailure: false,\n\t\t\tfailure: false,\n\t\t},\n\t\t{\n\t\t\t// No such process.\n\t\t\tparentProc: 5,\n\t\t\tprocesses: []Process{\n\t\t\t\t{10, 1, \"parent\"},\n\t\t\t\t{20, 15, \"child1\"},\n\t\t\t\t{30, 15, \"child2\"},\n\t\t\t\t{40, 1, \"otherprocess\"},\n\t\t\t},\n\t\t\tkillList: []int{},\n\t\t\tlistFailure: false,\n\t\t\tkillFailure: false,\n\t\t\tfailure: true,\n\t\t},\n\t\t{\n\t\t\t// 4 levels of processes and two unrelated processes.\n\t\t\tparentProc: 10,\n\t\t\tprocesses: []Process{\n\t\t\t\t{5, 1, \"otherprocess\"},\n\t\t\t\t{10, 1, \"parent\"},\n\t\t\t\t{20, 10, \"child\"},\n\t\t\t\t{25, 5, \"anotherprocess\"},\n\t\t\t\t{30, 20, \"grandchild\"},\n\t\t\t\t{40, 30, \"greatgrandchild\"},\n\t\t\t},\n\t\t\tkillList: []int{10, 20, 30, 40},\n\t\t\tlistFailure: false,\n\t\t\tkillFailure: false,\n\t\t\tfailure: false,\n\t\t},\n\t\t{\n\t\t\t// ListProcesses() failure.\n\t\t\tparentProc: 10,\n\t\t\tprocesses: []Process{},\n\t\t\tkillList: []int{},\n\t\t\tlistFailure: true,\n\t\t\tkillFailure: false,\n\t\t\tfailure: true,\n\t\t},\n\t\t{\n\t\t\t// KillProcess() failure.\n\t\t\tparentProc: 10,\n\t\t\tprocesses: []Process{\n\t\t\t\t{10, 1, \"parent\"},\n\t\t\t\t{20, 10, \"child1\"},\n\t\t\t\t{30, 10, \"child2\"},\n\t\t\t\t{40, 1, \"otherprocess\"},\n\t\t\t},\n\t\t\tkillList: []int{},\n\t\t\tlistFailure: false,\n\t\t\tkillFailure: true,\n\t\t\tfailure: true,\n\t\t},\n\t}\n\tfor i, tc := range testCases {\n\t\tmph := MockProcessHandler{\n\t\t\tProcesses: append([]Process{}, tc.processes...),\n\t\t\tListFailure: tc.listFailure,\n\t\t\tKillFailure: tc.killFailure,\n\t\t}\n\t\tptk := NewProcessTreeKiller(&mph)\n\t\terr := ptk.KillProcessTree(tc.parentProc)\n\t\tmsg := fmt.Sprintf(\"test case #%d %+v failed; state: %+v\", i, tc, mph)\n\t\tif !tc.failure {\n\t\t\tassert.NoError(t, err, msg)\n\t\t} else {\n\t\t\tassert.Error(t, err, msg)\n\t\t}\n\t\tassert.ElementsMatch(t, tc.killList, mph.KillList, msg)\n\t}\n}",
"func buildTree(preorder []int, inorder []int) *TreeNode {\n\n\tif preorder == nil || len(preorder) == 0 {\n\t\treturn nil\n\t}\n\tif len(preorder) == 1 {\n\t\treturn &TreeNode{Val: preorder[0]}\n\t}\n\n\troot := preorder[0]\n\trootNode := &TreeNode{Val: root}\n\trootIndex := 0\n\tfor i := 0; i < len(inorder); i++ {\n\t\tif inorder[i] == root {\n\t\t\trootIndex = i\n\t\t\tbreak\n\t\t}\n\t}\n\n\tleftInorder := inorder[:rootIndex]\n\tleftPreorder := preorder[1 : len(leftInorder)+1]\n\trightInorder := inorder[rootIndex+1:]\n\trightPreorder := preorder[1+len(leftInorder):]\n\n\trootNode.Left = buildTree(leftPreorder, leftInorder)\n\trootNode.Right = buildTree(rightPreorder, rightInorder)\n\n\treturn rootNode\n\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetBalance get ether balance for a specified address | func (e *Etherscan) GetBalance(address string) (*apd.Decimal, error) {
url := fmt.Sprintf("https://%s/api?module=account&action=balance&address=%s&tag=latest&apikey=%s",
e.Domain, address, e.APIKey,
)
res, err := req.Get(url)
if err != nil {
return nil, err
}
var result map[string]interface{}
res.ToJSON(&result)
if result["status"] != "1" {
return nil, errors.New(result["result"].(string))
}
b, _, _ := apd.NewFromString(result["result"].(string))
return b, nil
} | [
"func (c *Client) GetAddressBalance(address string) (balance Balance, err error) {\n\tr, err := c.request(\"getaddressbalance\", []interface{}{address})\n\tif err = c.error(err, &r); err != nil {\n\t\treturn\n\t}\n\n\tvar b map[string]interface{}\n\terr = json.Unmarshal(r.Result, &b)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Cast strings to float64\n\tif b[\"unconfirmed\"] != nil {\n\t\tif v, err := strconv.ParseFloat(b[\"unconfirmed\"].(string), 64); err == nil {\n\t\t\tbalance.Unconfirmed = v\n\t\t}\n\t}\n\n\tif b[\"confirmed\"] != nil {\n\t\tif v, err := strconv.ParseFloat(b[\"confirmed\"].(string), 64); err == nil {\n\t\t\tbalance.Confirmed = v\n\t\t}\n\t}\n\n\treturn\n}",
"func (e *Ethereum) Balance(address, token string) (ethBal, tokBal *big.Int, err error) {\n\treturn e.c.GetBalance(address, token)\n}",
"func (c *Client) GetEtherBalance(address string) (*big.Int, error) {\n\tif !addressCheck(address) {\n\t\treturn nil, errors.New(\"Invalid Address\")\n\t}\n\taccount := common.HexToAddress(address)\n\treturn c.Ethclient.BalanceAt(context.Background(), account, nil)\n}",
"func (t *GoTezos) Balance(blockhash, address string) (*big.Int, error) {\n\tquery := fmt.Sprintf(\"/chains/main/blocks/%s/context/contracts/%s/balance\", blockhash, address)\n\tresp, err := t.get(query)\n\tif err != nil {\n\t\treturn big.NewInt(0), errors.Wrap(err, \"failed to get balance\")\n\t}\n\n\tbalance, err := newInt(resp)\n\tif err != nil {\n\t\treturn big.NewInt(0), errors.Wrap(err, \"failed to unmarshal balance\")\n\t}\n\n\treturn balance.Big, nil\n}",
"func (s *DB) GetBalance(addr common.Address) uint64 {\n\tstateObject := s.getStateObject(addr)\n\tif stateObject != nil {\n\t\treturn stateObject.Balance()\n\t}\n\treturn 0\n}",
"func (e *Etherscan) GetTokenBalance(address string) (*apd.Decimal, error) {\n\turl := fmt.Sprintf(\"https://%s/api?module=account&action=tokenbalance&contractaddress=%s&address=%s&tag=latest&apikey=%s\",\n\t\te.Domain, e.ContractAddress, address, e.APIKey,\n\t)\n\tres, err := req.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar result map[string]interface{}\n\tres.ToJSON(&result)\n\tif result[\"status\"] != \"1\" {\n\t\treturn nil, errors.New(result[\"result\"].(string))\n\t}\n\n\tb, _, _ := apd.NewFromString(result[\"result\"].(string))\n\treturn b, nil\n}",
"func (c *Client) Balance(ctx context.Context, a swarm.Address) (resp Balance, err error) {\n\tb, err := c.debug.Node.Balance(ctx, a)\n\tif err != nil {\n\t\treturn Balance{}, fmt.Errorf(\"get balance with node %s: %w\", a.String(), err)\n\t}\n\n\treturn Balance{\n\t\tBalance: b.Balance.Int64(),\n\t\tPeer: b.Peer,\n\t}, nil\n}",
"func (c *Client) Balance(addr string) (*big.Int, error) {\n\treturn c.balance(context.Background(), addr)\n}",
"func (_LuxUni_EET *LuxUni_EETCaller) GetBalance(opts *bind.CallOpts, _addr common.Address, isReceiver bool, isTotalScan bool) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _LuxUni_EET.contract.Call(opts, out, \"getBalance\", _addr, isReceiver, isTotalScan)\n\treturn *ret0, err\n}",
"func (_GlobalInbox *GlobalInboxCaller) GetEthBalance(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _GlobalInbox.contract.Call(opts, &out, \"getEthBalance\", _owner)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}",
"func (_GlobalEthWallet *GlobalEthWalletCaller) GetEthBalance(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _GlobalEthWallet.contract.Call(opts, &out, \"getEthBalance\", _owner)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}",
"func (b *Bittrex) GetBalance(currency string) (res string) { //balance helper.Balance, err error) {\r\n\tr, err := b.client.do(\"GET\", \"account/getbalance?currency=\"+strings.ToUpper(currency), \"\", true)\r\n\tres = helper.BalanceResponseHandler(r, err, \"GetBalance\")\r\n\treturn\r\n}",
"func (client *CallerSubscriberClient) GetERC20Balance(address common.Address, contractAddress common.Address) (*big.Int, error) {\n\tresult := \"\"\n\tnumLinkBigInt := new(big.Int)\n\tfunctionSelector := models.HexToFunctionSelector(\"0x70a08231\") // balanceOf(address)\n\tdata := utils.ConcatBytes(functionSelector.Bytes(), common.LeftPadBytes(address.Bytes(), utils.EVMWordByteLen))\n\targs := CallArgs{\n\t\tTo: contractAddress,\n\t\tData: data,\n\t}\n\terr := client.logCall(&result, \"eth_call\", args, \"latest\")\n\tif err != nil {\n\t\treturn numLinkBigInt, err\n\t}\n\tnumLinkBigInt.SetString(result, 0)\n\treturn numLinkBigInt, nil\n}",
"func (ac *AddressCache) Balance(addr string) (*dbtypes.AddressBalance, *BlockID) {\n\taci := ac.addressCacheItem(addr)\n\tif aci == nil {\n\t\treturn nil, nil\n\t}\n\treturn aci.Balance()\n}",
"func (a *TestnetInsightApiService) TestnetGetAddressBalance(ctx context.Context, address string) (float32, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Get\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue float32\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/testnet/ins/addr/{address}/balance\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"address\"+\"}\", fmt.Sprintf(\"%v\", address), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v float32\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}",
"func GetBalance(w http.ResponseWriter, r *http.Request) {\n\tparametros := mux.Vars(r)\n\taccountID, err := strconv.ParseUint(parametros[\"account_id\"], 10, 64)\n\tif err != nil {\n\t\tresposta.Erro(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\taccountIDToken, err := autenticacao.ExtrairAccountID(r)\n\tif err != nil {\n\t\tresposta.Erro(w, http.StatusUnauthorized, err)\n\t\treturn\n\t}\n\n\tif accountID != accountIDToken {\n\t\tresposta.Erro(w, http.StatusForbidden, errors.New(\"não é permitido obter o saldo de uma que não seja sua\"))\n\t\treturn\n\t}\n\n\tdb, err := db.ConectDB()\n\tif err != nil {\n\t\tresposta.Erro(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tdefer db.Close()\n\n\trepository := repository.AccountsRepository(db)\n\n\tbalance, err := repository.GetAccountBalance(accountID)\n\tif err != nil {\n\t\tresposta.Erro(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tresposta.JSON(w, http.StatusAccepted, balance)\n}",
"func (_DhToken *DhTokenCaller) BalanceOf(opts *bind.CallOpts, account common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _DhToken.contract.Call(opts, out, \"balanceOf\", account)\n\treturn *ret0, err\n}",
"func (_Dai *DaiCaller) BalanceOf(opts *bind.CallOpts, src common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Dai.contract.Call(opts, out, \"balanceOf\", src)\n\treturn *ret0, err\n}",
"func (t *RpcServ) GetFrozenBalance(gctx context.Context, req *pb.AddressStatus) (*pb.AddressStatus, error) {\n\t// 默认响应\n\tresp := &pb.AddressStatus{\n\t\tBcs: make([]*pb.TokenDetail, 0),\n\t}\n\t// 获取请求上下文,对内传递rctx\n\trctx := sctx.ValueReqCtx(gctx)\n\n\tif req == nil || req.GetAddress() == \"\" {\n\t\trctx.GetLog().Warn(\"param error,some param unset\")\n\t\treturn resp, ecom.ErrParameter\n\t}\n\n\tfor i := 0; i < len(req.Bcs); i++ {\n\t\ttmpTokenDetail := &pb.TokenDetail{\n\t\t\tBcname: req.Bcs[i].Bcname,\n\t\t}\n\t\thandle, err := models.NewChainHandle(req.Bcs[i].Bcname, rctx)\n\t\tif err != nil {\n\t\t\ttmpTokenDetail.Error = pb.XChainErrorEnum_BLOCKCHAIN_NOTEXIST\n\t\t\ttmpTokenDetail.Balance = \"\"\n\t\t\tresp.Bcs = append(resp.Bcs, tmpTokenDetail)\n\t\t\tcontinue\n\t\t}\n\t\tbalance, err := handle.GetFrozenBalance(req.Address)\n\t\tif err != nil {\n\t\t\ttmpTokenDetail.Error = pb.XChainErrorEnum_UNKNOW_ERROR\n\t\t\ttmpTokenDetail.Balance = \"\"\n\t\t} else {\n\t\t\ttmpTokenDetail.Error = pb.XChainErrorEnum_SUCCESS\n\t\t\ttmpTokenDetail.Balance = balance\n\t\t}\n\t\tresp.Bcs = append(resp.Bcs, tmpTokenDetail)\n\t}\n\tresp.Address = req.GetAddress()\n\n\trctx.GetLog().SetInfoField(\"account\", req.GetAddress())\n\treturn resp, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetTokenBalance get token balance for a specified address | func (e *Etherscan) GetTokenBalance(address string) (*apd.Decimal, error) {
url := fmt.Sprintf("https://%s/api?module=account&action=tokenbalance&contractaddress=%s&address=%s&tag=latest&apikey=%s",
e.Domain, e.ContractAddress, address, e.APIKey,
)
res, err := req.Get(url)
if err != nil {
return nil, err
}
var result map[string]interface{}
res.ToJSON(&result)
if result["status"] != "1" {
return nil, errors.New(result["result"].(string))
}
b, _, _ := apd.NewFromString(result["result"].(string))
return b, nil
} | [
"func (e *Ethereum) Balance(address, token string) (ethBal, tokBal *big.Int, err error) {\n\treturn e.c.GetBalance(address, token)\n}",
"func tcTokenBalance(eng *vm.Engine, index int64, args []uint64) (uint64, error) {\n\trunningFrame, _ := eng.RunningAppFrame()\n\tvmem := runningFrame.VM.VMemory()\n\taddrTmp, err := vmem.GetString(args[0])\n\tif err != nil || !common.IsHexAddress(string(addrTmp)) {\n\t\treturn 0, vm.ErrInvalidApiArgs\n\t}\n\taddr := common.HexToAddress(string(addrTmp))\n\ttokenTmp, err := vmem.GetString(args[1])\n\tif err != nil || !common.IsHexAddress(string(tokenTmp)) {\n\t\treturn 0, vm.ErrInvalidApiArgs\n\t}\n\ttoken := common.HexToAddress(string(tokenTmp))\n\n\tmState := eng.State.(types.StateDB)\n\tvar balance *big.Int\n\tif token == common.EmptyAddress {\n\t\tbalance = mState.GetBalance(addr)\n\t} else {\n\t\tbalance = mState.GetTokenBalance(addr, token)\n\t}\n\treturn vmem.SetBytes([]byte(balance.String()))\n}",
"func (e *Etherscan) GetBalance(address string) (*apd.Decimal, error) {\n\turl := fmt.Sprintf(\"https://%s/api?module=account&action=balance&address=%s&tag=latest&apikey=%s\",\n\t\te.Domain, address, e.APIKey,\n\t)\n\tres, err := req.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar result map[string]interface{}\n\tres.ToJSON(&result)\n\tif result[\"status\"] != \"1\" {\n\t\treturn nil, errors.New(result[\"result\"].(string))\n\t}\n\n\tb, _, _ := apd.NewFromString(result[\"result\"].(string))\n\treturn b, nil\n}",
"func (_DhToken *DhTokenCaller) BalanceOf(opts *bind.CallOpts, account common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _DhToken.contract.Call(opts, out, \"balanceOf\", account)\n\treturn *ret0, err\n}",
"func (p *proxy) FMintTokenBalance(owner *common.Address, token *common.Address, tp types.DefiTokenType) (hexutil.Big, error) {\n\treturn p.rpc.FMintTokenBalance(owner, token, tp)\n}",
"func (_Radex *RadexSession) BalanceOf(token common.Address, user common.Address) (*big.Int, error) {\n\treturn _Radex.Contract.BalanceOf(&_Radex.CallOpts, token, user)\n}",
"func (s *AccountService) GetTokenBalanceProvidor(owner common.Address, tokenAddress common.Address) (*types.TokenBalance, error) {\n\ttoken, err := s.TokenDao.GetByAddress(tokenAddress)\n\tif err != nil || token == nil {\n\t\treturn nil, err\n\t}\n\ttokenBalance := &types.TokenBalance{\n\t\tAddress: tokenAddress,\n\t\tSymbol: token.Symbol,\n\t\tDecimals: token.Decimals,\n\t\tAvailableBalance: big.NewInt(0),\n\t\tInOrderBalance: big.NewInt(0),\n\t\tBalance: big.NewInt(0),\n\t\tInUsdBalance: big.NewFloat(0),\n\t}\n\tb, err := s.Provider.Balance(owner, tokenAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttokenBalance.Balance = b\n\n\tlistPairs, err := s.PairDao.GetActivePairs()\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn nil, err\n\t}\n\ttokens, err := s.TokenDao.GetAll()\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn nil, err\n\t}\n\tsellTokenExchangeLockedBalance, err := s.OrderDao.GetUserLockedBalance(owner, tokenAddress, listPairs)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn nil, err\n\t}\n\tsellTokenLendingLockedBalance, err := s.LendingDao.GetUserLockedBalance(owner, tokenAddress, tokens)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn nil, err\n\t}\n\tsellTokenLockedBalance := new(big.Int).Add(sellTokenExchangeLockedBalance, sellTokenLendingLockedBalance)\n\ttokenBalance.InOrderBalance = sellTokenLockedBalance\n\ttokenBalance.AvailableBalance = math.Sub(b, sellTokenLockedBalance)\n\n\tprice, _ := s.OHLCVService.GetLastPriceCurrentByTime(tokenBalance.Symbol, time.Now())\n\n\tif tokenBalance != nil && price != nil {\n\t\tinUsdBalance := new(big.Float).Mul(price, new(big.Float).SetInt(tokenBalance.Balance))\n\t\tinUsdBalance = new(big.Float).Quo(inUsdBalance, new(big.Float).SetInt(big.NewInt(int64(math2.Pow10(tokenBalance.Decimals)))))\n\t\ttokenBalance.InUsdBalance = inUsdBalance\n\t}\n\n\treturn tokenBalance, nil\n}",
"func (_Radex *RadexCaller) BalanceOf(opts *bind.CallOpts, token common.Address, user common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Radex.contract.Call(opts, out, \"balanceOf\", token, user)\n\treturn *ret0, err\n}",
"func (_Token *TokenCaller) BalanceOf(opts *bind.CallOpts, owner common.Address, id *big.Int) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Token.contract.Call(opts, out, \"balanceOf\", owner, id)\n\treturn *ret0, err\n}",
"func (t *RpcServ) GetFrozenBalance(gctx context.Context, req *pb.AddressStatus) (*pb.AddressStatus, error) {\n\t// 默认响应\n\tresp := &pb.AddressStatus{\n\t\tBcs: make([]*pb.TokenDetail, 0),\n\t}\n\t// 获取请求上下文,对内传递rctx\n\trctx := sctx.ValueReqCtx(gctx)\n\n\tif req == nil || req.GetAddress() == \"\" {\n\t\trctx.GetLog().Warn(\"param error,some param unset\")\n\t\treturn resp, ecom.ErrParameter\n\t}\n\n\tfor i := 0; i < len(req.Bcs); i++ {\n\t\ttmpTokenDetail := &pb.TokenDetail{\n\t\t\tBcname: req.Bcs[i].Bcname,\n\t\t}\n\t\thandle, err := models.NewChainHandle(req.Bcs[i].Bcname, rctx)\n\t\tif err != nil {\n\t\t\ttmpTokenDetail.Error = pb.XChainErrorEnum_BLOCKCHAIN_NOTEXIST\n\t\t\ttmpTokenDetail.Balance = \"\"\n\t\t\tresp.Bcs = append(resp.Bcs, tmpTokenDetail)\n\t\t\tcontinue\n\t\t}\n\t\tbalance, err := handle.GetFrozenBalance(req.Address)\n\t\tif err != nil {\n\t\t\ttmpTokenDetail.Error = pb.XChainErrorEnum_UNKNOW_ERROR\n\t\t\ttmpTokenDetail.Balance = \"\"\n\t\t} else {\n\t\t\ttmpTokenDetail.Error = pb.XChainErrorEnum_SUCCESS\n\t\t\ttmpTokenDetail.Balance = balance\n\t\t}\n\t\tresp.Bcs = append(resp.Bcs, tmpTokenDetail)\n\t}\n\tresp.Address = req.GetAddress()\n\n\trctx.GetLog().SetInfoField(\"account\", req.GetAddress())\n\treturn resp, nil\n}",
"func (_AdapterRegistry *AdapterRegistrySession) GetFullTokenBalance(tokenType string, token common.Address) (FullTokenBalance, error) {\n\treturn _AdapterRegistry.Contract.GetFullTokenBalance(&_AdapterRegistry.CallOpts, tokenType, token)\n}",
"func (c *Client) GetAddressBalance(address string) (balance Balance, err error) {\n\tr, err := c.request(\"getaddressbalance\", []interface{}{address})\n\tif err = c.error(err, &r); err != nil {\n\t\treturn\n\t}\n\n\tvar b map[string]interface{}\n\terr = json.Unmarshal(r.Result, &b)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Cast strings to float64\n\tif b[\"unconfirmed\"] != nil {\n\t\tif v, err := strconv.ParseFloat(b[\"unconfirmed\"].(string), 64); err == nil {\n\t\t\tbalance.Unconfirmed = v\n\t\t}\n\t}\n\n\tif b[\"confirmed\"] != nil {\n\t\tif v, err := strconv.ParseFloat(b[\"confirmed\"].(string), 64); err == nil {\n\t\t\tbalance.Confirmed = v\n\t\t}\n\t}\n\n\treturn\n}",
"func (c *Client) Balance(ctx context.Context, a swarm.Address) (resp Balance, err error) {\n\tb, err := c.debug.Node.Balance(ctx, a)\n\tif err != nil {\n\t\treturn Balance{}, fmt.Errorf(\"get balance with node %s: %w\", a.String(), err)\n\t}\n\n\treturn Balance{\n\t\tBalance: b.Balance.Int64(),\n\t\tPeer: b.Peer,\n\t}, nil\n}",
"func (_AdapterRegistry *AdapterRegistryCallerSession) GetFullTokenBalance(tokenType string, token common.Address) (FullTokenBalance, error) {\n\treturn _AdapterRegistry.Contract.GetFullTokenBalance(&_AdapterRegistry.CallOpts, tokenType, token)\n}",
"func GetTokenByAddress(address string, resp *AccountResponse) (Token, error) {\n\tfor _, token := range resp.Accounts[0].Tokens {\n\t\tif token.Address == address {\n\t\t\treturn token, nil\n\t\t}\n\t}\n\treturn Token{}, errors.New(\"token not found\")\n}",
"func (st *StateTracker) Balance(addr address.Address) abi.TokenAmount {\n\treturn st.Header(addr).Balance\n}",
"func (r *API) getBalance() (balances []*AccountTokenBalanceVo, err error) {\n\tchannels, err := r.GetChannelList(utils.EmptyAddress, utils.EmptyAddress)\n\tif err != nil {\n\t\treturn\n\t}\n\ttoken2ChannelMap := make(map[common.Address][]*channeltype.Serialization)\n\tfor _, channel := range channels {\n\t\ttoken2ChannelMap[channel.TokenAddress()] = append(token2ChannelMap[channel.TokenAddress()], channel)\n\t}\n\tfor tokenAddress, channels := range token2ChannelMap {\n\t\tbalance := &AccountTokenBalanceVo{\n\t\t\tTokenAddress: tokenAddress.String(),\n\t\t\tBalance: big.NewInt(0),\n\t\t\tLockedAmount: big.NewInt(0),\n\t\t}\n\t\tfor _, channel := range channels {\n\t\t\tbalance.Balance.Add(balance.Balance, channel.OurBalance())\n\t\t\tbalance.LockedAmount.Add(balance.LockedAmount, channel.OurAmountLocked())\n\t\t}\n\t\tbalances = append(balances, balance)\n\t}\n\treturn\n}",
"func (t *RpcServ) GetBalanceDetail(gctx context.Context, req *pb.AddressBalanceStatus) (*pb.AddressBalanceStatus, error) {\n\t// 默认响应\n\tresp := &pb.AddressBalanceStatus{\n\t\tTfds: make([]*pb.TokenFrozenDetails, 0),\n\t}\n\t// 获取请求上下文,对内传递rctx\n\trctx := sctx.ValueReqCtx(gctx)\n\n\tif req == nil || req.GetAddress() == \"\" {\n\t\trctx.GetLog().Warn(\"param error,some param unset\")\n\t\treturn resp, ecom.ErrParameter\n\t}\n\n\tfor i := 0; i < len(req.Tfds); i++ {\n\t\ttmpFrozenDetails := &pb.TokenFrozenDetails{\n\t\t\tBcname: req.Tfds[i].Bcname,\n\t\t}\n\t\thandle, err := models.NewChainHandle(req.Tfds[i].Bcname, rctx)\n\t\tif err != nil {\n\t\t\ttmpFrozenDetails.Error = pb.XChainErrorEnum_BLOCKCHAIN_NOTEXIST\n\t\t\ttmpFrozenDetails.Tfd = nil\n\t\t\tresp.Tfds = append(resp.Tfds, tmpFrozenDetails)\n\t\t\tcontinue\n\t\t}\n\t\ttfd, err := handle.GetBalanceDetail(req.GetAddress())\n\t\tif err != nil {\n\t\t\ttmpFrozenDetails.Error = pb.XChainErrorEnum_UNKNOW_ERROR\n\t\t\ttmpFrozenDetails.Tfd = nil\n\t\t} else {\n\t\t\txchainTfd, err := acom.BalanceDetailsToXchain(tfd)\n\t\t\tif err != nil {\n\t\t\t\ttmpFrozenDetails.Error = pb.XChainErrorEnum_UNKNOW_ERROR\n\t\t\t\ttmpFrozenDetails.Tfd = nil\n\t\t\t}\n\t\t\ttmpFrozenDetails.Error = pb.XChainErrorEnum_SUCCESS\n\t\t\ttmpFrozenDetails.Tfd = xchainTfd\n\t\t}\n\t\tresp.Tfds = append(resp.Tfds, tmpFrozenDetails)\n\t}\n\tresp.Address = req.GetAddress()\n\n\trctx.GetLog().SetInfoField(\"account\", req.GetAddress())\n\treturn resp, nil\n}",
"func (c *Client) Balance(addr string) (*big.Int, error) {\n\treturn c.balance(context.Background(), addr)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Increment the total time in microseconds based on the elapsed time | func (ph *PasswordHash) incrementTime(start time.Time) {
elapsed := time.Since(start)
ph.totalTimeMicro = ph.totalTimeMicro + elapsed.Microseconds()
} | [
"func advanceCounterTime(t time.Duration) {\n\tatomic.AddInt64(&ticks, t.Nanoseconds())\n}",
"func (time_ TimeVal) Add(microseconds int) {\n\tC.g_time_val_add(time_.native(), C.glong(microseconds))\n}",
"func (s *Stats) incTimeout() {\n\ts.totalCount++\n}",
"func AddGigasecond(inputTime time.Time) time.Time {\n\t return inputTime.Add(time.Duration(1000000000) * time.Second)\n}",
"func (c *Clock) Increment() {\n\tatomic.AddInt64(&c.times, 1)\n}",
"func (timer *BenchMark) TimeElapsed() int64 {\n\treturn time.Since(timer.time).Nanoseconds()\n}",
"func increment(t time.Time, x int) *time.Time {\n\ti := t.Add(time.Minute * time.Duration(x))\n\treturn &i\n}",
"func (p *Point) RecordTotalTime() {\n\tp.OverallDuration = time.Since(p.OverallStart)\n}",
"func (this *MockClock) NotifyTimeElapsed(d time.Duration) {\n\tthis.now = this.Now().Add(d)\n}",
"func AddGigasecond(t time.Time) time.Time {\n\tgigaSecond := time.Second * time.Duration(int64(math.Pow(10, 9)))\n\treturn t.Add(gigaSecond)\n}",
"func (t *Timer) AddCount(diff int64) {\n\tC.al_add_timer_count((*C.ALLEGRO_TIMER)(t), C.int64_t(diff))\n}",
"func (t *Timer) Inc() {\n\tt.N++\n\tif isCheckpoint(t.N) {\n\t\tt.print()\n\t}\n}",
"func (ctx *BuildContext) AccumulatedTime(label TimerLabel) time.Duration {\n\tif ctx.timerEnabled {\n\t\treturn ctx.accTime[label]\n\t}\n\treturn time.Duration(0)\n}",
"func (r *report) updateElapsedTime() {\n\tr.ElapsedTime = time.Now().Sub(memory.StartTime).String()\n}",
"func TimeElapsed(start time.Time, name string) {\n\telapsed := time.Since(start)\n\tlog.Printf(\"%s took %v\", name, elapsed)\n}",
"func (m *MockClock) AddTime(d time.Duration) {\n\tm.cond.L.Lock()\n\tdefer m.cond.L.Unlock()\n\n\tassertFuture(m.now, m.now.Add(d))\n\tm.now = m.now.Add(d)\n\tm.cond.Broadcast()\n}",
"func (u UTCTimestamp) Add(dur time.Duration) UTCTimestamp {\n\treturn u + UTCTimestamp(dur)\n}",
"func (_m *MetricCollector) UpdateTotalDuration(timeSinceStart time.Duration) {\n\t_m.Called(timeSinceStart)\n}",
"func TimeElapsed(key string) {\n\telapsed := currentTime.getTimeSince(key)\n\tDebug(\"%s took %s\", key, elapsed)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calcEtag will create an etag based on the md5 of mtime, inode (if available), device (if available) and size. errors are logged, but an etag will still be returned | func calcEtag(ctx context.Context, fi os.FileInfo) string {
log := appctx.GetLogger(ctx)
h := md5.New()
err := binary.Write(h, binary.BigEndian, fi.ModTime().UnixNano())
if err != nil {
log.Error().Err(err).Msg("error writing mtime")
}
// device and inode have no meaning on windows
err = binary.Write(h, binary.BigEndian, fi.Size())
if err != nil {
log.Error().Err(err).Msg("error writing size")
}
etag := fmt.Sprintf(`"%x"`, h.Sum(nil))
return fmt.Sprintf("\"%s\"", strings.Trim(etag, "\""))
} | [
"func ComputeEtag(path string) (string, error) {\n\tvar result []byte\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer Close(file)\n\n\thash := md5.New()\n\tif _, err := io.Copy(hash, file); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn hex.EncodeToString(hash.Sum(result)), nil\n}",
"func etag( d []byte ) string {\n\thash := md5.Sum( d )\n\treturn hex.EncodeToString(hash[:])\n}",
"func generateETag(fi os.FileInfo) string {\n\tetag := fmt.Sprint(fi.Size()) + fi.Name() + fi.ModTime().UTC().Format(http.TimeFormat)\n\treturn base64.StdEncoding.EncodeToString([]byte(etag))\n}",
"func (x *Cache) GetEtag(path string) (string, error) {\n\tetagFile := getEtagFile(path)\n\t_, err := os.Stat(etagFile)\n\tif os.IsNotExist(err) {\n\t\treturn \"\", nil\n\t}\n\tbuf, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(buf), nil\n}",
"func (f fileinfo) ETag() string {\n\treturn f.etag\n}",
"func (o *NoStateMetaData) GetEtag() *string {\n\tif o == nil {\n\t\treturn nil\n\t}\n\n\treturn o.Etag\n}",
"func (o DicomStoreIamPolicyOutput) Etag() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DicomStoreIamPolicy) pulumi.StringOutput { return v.Etag }).(pulumi.StringOutput)\n}",
"func (f *File) updateEtagAndLastModified(headers http.Header) {\n\tf.Properties.Etag = headers.Get(\"Etag\")\n\tf.Properties.LastModified = headers.Get(\"Last-Modified\")\n}",
"func (o *NoStateMetaData) GetEtagOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Etag, true\n}",
"func scanETag(s string) (etag string, remain string) {\n\ts = textproto.TrimString(s)\n\tstart := 0\n\tif strings.HasPrefix(s, \"W/\") {\n\t\tstart = 2\n\t}\n\tif len(s[start:]) < 2 || s[start] != '\"' {\n\t\treturn \"\", \"\"\n\t}\n\t// ETag is either W/\"text\" or \"text\".\n\t// See RFC 7232 2.3.\n\tfor i := start + 1; i < len(s); i++ {\n\t\tc := s[i]\n\t\tswitch {\n\t\t// Character values allowed in ETags.\n\t\tcase c == 0x21 || c >= 0x23 && c <= 0x7E || c >= 0x80:\n\t\tcase c == '\"':\n\t\t\treturn string(s[:i+1]), s[i+1:]\n\t\tdefault:\n\t\t\tbreak\n\t\t}\n\t}\n\treturn \"\", \"\"\n}",
"func (h *Handler) SetETag(val interface{}) {\n\tvar str string\n\tswitch val.(type) {\n\tcase string:\n\t\tstr = val.(string)\n\tcase time.Time:\n\t\tstr = val.(time.Time).Format(time.RFC1123)\n\tcase fmt.Stringer:\n\t\tstr = val.(fmt.Stringer).String()\n\tdefault:\n\t\tstr = fmt.Sprintf(\"%v\", val)\n\t}\n\n\thash := md5.New()\n\tio.WriteString(hash, str)\n\tetag := base64.StdEncoding.EncodeToString(hash.Sum(nil))\n\th.Res.Header().Set(\"ETag\", etag)\n}",
"func (i *item) ETag() (string, error) {\n\treturn *(i.properties.ETag), nil\n}",
"func (e Entry) ItemETag() string { return e.ETag }",
"func (d Document) ETag() (ETag, error) {\n\th := fnv.New64a()\n\terr := encodeDocument(h, d, true)\n\treturn ETag(h.Sum64()), err\n}",
"func BuildETag(name string, data []byte) string {\n\tcrc := crc32.ChecksumIEEE(data)\n\treturn fmt.Sprintf(`\"%s-%d-%08X\"`, name, len(data), crc)\n}",
"func (o EkmConnectionOutput) Etag() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *EkmConnection) pulumi.StringOutput { return v.Etag }).(pulumi.StringOutput)\n}",
"func HandleEtagCache(req *http.Request, w http.ResponseWriter, fi os.FileInfo) (handled bool) {\n\tetag := generateETag(fi)\n\tif req.Header.Get(\"If-None-Match\") == etag {\n\t\tw.WriteHeader(http.StatusNotModified)\n\t\treturn true\n\t}\n\n\tw.Header().Set(\"Cache-Control\", GetCacheControl())\n\tw.Header().Set(\"ETag\", etag)\n\treturn false\n}",
"func (o TableIamPolicyOutput) Etag() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *TableIamPolicy) pulumi.StringOutput { return v.Etag }).(pulumi.StringOutput)\n}",
"func ETag(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method == \"GET\" {\n\t\t\twr := newETagWriter(w, r)\n\t\t\th.ServeHTTP(wr, r)\n\t\t\twr.end()\n\t\t} else {\n\t\t\th.ServeHTTP(w, r)\n\t\t}\n\t})\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
AttachProgram attaches a BPF program from a file to the TC attach point | func AttachProgram(attachPoint AttachPoint, hostIP net.IP) error {
// FIXME we use this lock so that two copies of tc running in parallel don't re-use the same jump map.
// This can happen if tc incorrectly decides the two programs are identical (when in fact they differ by attach
// point).
log.Debug("AttachProgram waiting for lock...")
tcLock.Lock()
defer tcLock.Unlock()
log.Debug("AttachProgram got lock.")
// Work around tc map name collision: when we load two identical BPF programs onto different interfaces, tc
// pins object-local maps to a namespace based on the hash of the BPF program, which is the same for both
// interfaces. Since we want one map per interface instead, we search for such maps and rename them before we
// release the tc lock.
//
// For our purposes, it should work to simply delete the map. However, when we tried that, the contents of the
// map get deleted even though it is in use by a BPF program.
defer repinJumpMaps()
tempDir, err := ioutil.TempDir("", "calico-tc")
if err != nil {
return errors.Wrap(err, "failed to create temporary directory")
}
defer func() {
_ = os.RemoveAll(tempDir)
}()
preCompiledBinary := path.Join(bpf.ObjectDir, attachPoint.Filename)
tempBinary := path.Join(tempDir, attachPoint.Filename)
exeData, err := ioutil.ReadFile(preCompiledBinary)
if err != nil {
return errors.Wrap(err, "failed to read pre-compiled BPF binary")
}
hostIP = hostIP.To4()
if len(hostIP) == 4 {
log.WithField("ip", hostIP).Debug("Patching in host IP")
replacement := make([]byte, 6)
copy(replacement[2:], hostIP)
exeData = bytes.ReplaceAll(exeData, []byte("\x00\x00HOST"), replacement)
}
// Patch in the log prefix; since this gets loaded as immediate values by the compiler, we know it'll be
// preceded by a 2-byte 0 offset so we include that in the match.
iface := []byte(attachPoint.Iface + "--------") // Pad on the right to make sure its long enough.
logBytes := make([]byte, 6)
copy(logBytes[2:], iface)
exeData = bytes.ReplaceAll(exeData, []byte("\x00\x00CALI"), logBytes)
copy(logBytes[2:], iface[4:8])
exeData = bytes.ReplaceAll(exeData, []byte("\x00\x00COLO"), logBytes)
err = ioutil.WriteFile(tempBinary, exeData, 0600)
if err != nil {
return errors.Wrap(err, "failed to write patched BPF binary")
}
tcCmd := exec.Command("tc",
"filter", "add", "dev", attachPoint.Iface,
string(attachPoint.Hook),
"bpf", "da", "obj", tempBinary,
"sec", attachPoint.Section)
out, err := tcCmd.Output()
if err != nil {
if strings.Contains(err.Error(), "Cannot find device") {
// Avoid a big, spammy log when the issue is that the interface isn't present.
log.WithField("iface", attachPoint.Iface).Info(
"Failed to attach BPF program; interface not found. Will retry if it show up.")
return nil
}
log.WithError(err).WithFields(log.Fields{"out": string(out)}).
WithField("command", tcCmd).Error("Failed to attach BPF program")
if err, ok := err.(*exec.ExitError); ok {
// ExitError is really unhelpful dumped to the log, swap it for a custom one.
return ErrAttachFailed{
ExitCode: err.ExitCode(),
Stderr: string(err.Stderr),
}
}
return errors.Wrap(err, "failed to attach TC program")
}
return nil
} | [
"func attachProgram(Ifindex int, program *ebpf.Program) error {\n\tlink, err := netlink.LinkByIndex(Ifindex)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn netlink.LinkSetXdpFdWithFlags(link, program.FD(), int(DefaultXdpFlags))\n}",
"func bpftoolMapAttach(progID string, mapID string) error {\n\tprog := \"bpftool\"\n\targs := []string{\"prog\", \"attach\", \"id\", progID, \"msg_verdict\", \"id\", mapID}\n\tout, err := exec.Command(prog, args...).CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to attach prog(%s) to map(%s): %s: %s\", progID, mapID, err, out)\n\t}\n\treturn nil\n}",
"func bpftoolAttach(bpfObject string) error {\n\tprog := \"bpftool\"\n\tbpffs := filepath.Join(bpf.GetMapRoot(), bpfObject)\n\tcgrp := cgroup.GetCgroupRoot()\n\targs := []string{\"cgroup\", \"attach\", cgrp, \"sock_ops\", \"pinned\", bpffs}\n\tout, err := exec.Command(prog, args...).CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to attach %s: %s: %s\", bpfObject, err, out)\n\t}\n\treturn nil\n}",
"func (p *libBPFGoBPFProgram) attachTracepoint(tracepoint string) error {\n\t_, err := p.program.AttachTracepoint(tracepoint)\n\treturn err\n}",
"func (p *Program) Attach(Ifindex int) error {\n\tif err := removeProgram(Ifindex); err != nil {\n\t\treturn err\n\t}\n\treturn attachProgram(Ifindex, p.Program)\n}",
"func (p *profiler) AttachBPF(fd int) error {\n\treturn unix.IoctlSetInt(p.fd, unix.PERF_EVENT_IOC_SET_BPF, fd)\n}",
"func (l *Loopback) Attach(file *os.File) error {\n\tf, err := os.OpenFile(l.Name, os.O_RDWR, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.device = f\n\tif _, _, e1 := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.LOOP_SET_FD, file.Fd()); e1 != 0 {\n\t\treturn fmt.Errorf(\"Attach file to %q failed: %v\", l.Name, e1.Error())\n\t}\n\treturn nil\n}",
"func (loop *Device) AttachFromFile(image *os.File, mode int, number *int) error {\n\treturn fmt.Errorf(\"unsupported on this platform\")\n}",
"func LoadAttachCgroupDeviceFilter(insts asm.Instructions, license string, dirFd int) (func() error, error) {\n\t// Increase `ulimit -l` limit to avoid BPF_PROG_LOAD error (#2167).\n\t// This limit is not inherited into the container.\n\tmemlockLimit := &unix.Rlimit{\n\t\tCur: unix.RLIM_INFINITY,\n\t\tMax: unix.RLIM_INFINITY,\n\t}\n\t_ = unix.Setrlimit(unix.RLIMIT_MEMLOCK, memlockLimit)\n\n\t// Get the list of existing programs.\n\toldProgs, err := findAttachedCgroupDeviceFilters(dirFd)\n\tif err != nil {\n\t\treturn nilCloser, err\n\t}\n\tuseReplaceProg := haveBpfProgReplace() && len(oldProgs) == 1\n\n\t// Generate new program.\n\tspec := &ebpf.ProgramSpec{\n\t\tType: ebpf.CGroupDevice,\n\t\tInstructions: insts,\n\t\tLicense: license,\n\t}\n\tprog, err := ebpf.NewProgram(spec)\n\tif err != nil {\n\t\treturn nilCloser, err\n\t}\n\n\t// If there is only one old program, we can just replace it directly.\n\tvar (\n\t\treplaceProg *ebpf.Program\n\t\tattachFlags uint32 = unix.BPF_F_ALLOW_MULTI\n\t)\n\tif useReplaceProg {\n\t\treplaceProg = oldProgs[0]\n\t\tattachFlags |= unix.BPF_F_REPLACE\n\t}\n\terr = link.RawAttachProgram(link.RawAttachProgramOptions{\n\t\tTarget: dirFd,\n\t\tProgram: prog,\n\t\tReplace: replaceProg,\n\t\tAttach: ebpf.AttachCGroupDevice,\n\t\tFlags: attachFlags,\n\t})\n\tif err != nil {\n\t\treturn nilCloser, fmt.Errorf(\"failed to call BPF_PROG_ATTACH (BPF_CGROUP_DEVICE, BPF_F_ALLOW_MULTI): %w\", err)\n\t}\n\tcloser := func() error {\n\t\terr = link.RawDetachProgram(link.RawDetachProgramOptions{\n\t\t\tTarget: dirFd,\n\t\t\tProgram: prog,\n\t\t\tAttach: ebpf.AttachCGroupDevice,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to call BPF_PROG_DETACH (BPF_CGROUP_DEVICE): %w\", err)\n\t\t}\n\t\t// TODO: Should we attach the old filters back in this case? Otherwise\n\t\t// we fail-open on a security feature, which is a bit scary.\n\t\treturn nil\n\t}\n\tif !useReplaceProg {\n\t\tlogLevel := logrus.DebugLevel\n\t\t// If there was more than one old program, give a warning (since this\n\t\t// really shouldn't happen with runc-managed cgroups) and then detach\n\t\t// all the old programs.\n\t\tif len(oldProgs) > 1 {\n\t\t\t// NOTE: Ideally this should be a warning but it turns out that\n\t\t\t// systemd-managed cgroups trigger this warning (apparently\n\t\t\t// systemd doesn't delete old non-systemd programs when\n\t\t\t// setting properties).\n\t\t\tlogrus.Infof(\"found more than one filter (%d) attached to a cgroup -- removing extra filters!\", len(oldProgs))\n\t\t\tlogLevel = logrus.InfoLevel\n\t\t}\n\t\tfor idx, oldProg := range oldProgs {\n\t\t\t// Output some extra debug info.\n\t\t\tif info, err := oldProg.Info(); err == nil {\n\t\t\t\tfields := logrus.Fields{\n\t\t\t\t\t\"type\": info.Type.String(),\n\t\t\t\t\t\"tag\": info.Tag,\n\t\t\t\t\t\"name\": info.Name,\n\t\t\t\t}\n\t\t\t\tif id, ok := info.ID(); ok {\n\t\t\t\t\tfields[\"id\"] = id\n\t\t\t\t}\n\t\t\t\tif runCount, ok := info.RunCount(); ok {\n\t\t\t\t\tfields[\"run_count\"] = runCount\n\t\t\t\t}\n\t\t\t\tif runtime, ok := info.Runtime(); ok {\n\t\t\t\t\tfields[\"runtime\"] = runtime.String()\n\t\t\t\t}\n\t\t\t\tlogrus.WithFields(fields).Logf(logLevel, \"removing old filter %d from cgroup\", idx)\n\t\t\t}\n\t\t\terr = link.RawDetachProgram(link.RawDetachProgramOptions{\n\t\t\t\tTarget: dirFd,\n\t\t\t\tProgram: oldProg,\n\t\t\t\tAttach: ebpf.AttachCGroupDevice,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn closer, fmt.Errorf(\"failed to call BPF_PROG_DETACH (BPF_CGROUP_DEVICE) on old filter program: %w\", err)\n\t\t\t}\n\t\t}\n\t}\n\treturn closer, nil\n}",
"func (o *Obj) AttachClassifier(secName, ifName string, ingress bool) (int, int, int, error) {\n\tcSecName := C.CString(secName)\n\tcIfName := C.CString(ifName)\n\tdefer C.free(unsafe.Pointer(cSecName))\n\tdefer C.free(unsafe.Pointer(cIfName))\n\tifIndex, err := C.if_nametoindex(cIfName)\n\tif err != nil {\n\t\treturn -1, -1, -1, err\n\t}\n\n\tret, err := C.bpf_tc_program_attach(o.obj, cSecName, C.int(ifIndex), C.bool(ingress))\n\tif err != nil {\n\t\treturn -1, -1, -1, fmt.Errorf(\"error attaching tc program %w\", err)\n\t}\n\n\treturn int(ret.prog_id), int(ret.priority), int(ret.handle), nil\n}",
"func main() {\n\tstopper := make(chan os.Signal, 1)\n\tsignal.Notify(stopper, os.Interrupt, syscall.SIGTERM)\n\n\t// Allow the current process to lock memory for eBPF resources.\n\tif err := rlimit.RemoveMemlock(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Load pre-compiled programs and maps into the kernel.\n\tobjs := bpfObjects{}\n\tif err := loadBpfObjects(&objs, nil); err != nil {\n\t\tlog.Fatalf(\"loading objects: %s\", err)\n\t}\n\tdefer objs.Close()\n\n\t//SEC(\"tracepoint/syscalls/sys_enter_execve\")\n\t// attach to xxx\n\tkp, err := link.Tracepoint(\"syscalls\", \"sys_enter_execve\", objs.BpfProg, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"opening tracepoint: %s\", err)\n\t}\n\tdefer kp.Close()\n\n\tlog.Printf(\"Successfully started! Please run \\\"sudo cat /sys/kernel/debug/tracing/trace_pipe\\\" to see output of the BPF programs\\n\")\n\n\t// Wait for a signal and close the perf reader,\n\t// which will interrupt rd.Read() and make the program exit.\n\t<-stopper\n\tlog.Println(\"Received signal, exiting program..\")\n}",
"func (s *BasetinyListener) EnterProgram(ctx *ProgramContext) {}",
"func (m *Manifest) AddUserProgram(imgpath string) {\n\n\tparts := strings.Split(imgpath, \"/\")\n\tif parts[0] == \".\" {\n\t\tparts = parts[1:]\n\t}\n\tm.program = path.Join(\"/\", path.Join(parts...))\n\tm.AddFile(m.program, imgpath)\n}",
"func (s *BasetinybasicListener) EnterProgram(ctx *ProgramContext) {}",
"func AttachRawLink(opts RawLinkOptions) (*RawLink, error) {\n\tif err := haveBPFLink(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif opts.Target < 0 {\n\t\treturn nil, fmt.Errorf(\"invalid target: %s\", sys.ErrClosedFd)\n\t}\n\n\tprogFd := opts.Program.FD()\n\tif progFd < 0 {\n\t\treturn nil, fmt.Errorf(\"invalid program: %s\", sys.ErrClosedFd)\n\t}\n\n\tattr := sys.LinkCreateAttr{\n\t\tTargetFd: uint32(opts.Target),\n\t\tProgFd: uint32(progFd),\n\t\tAttachType: sys.AttachType(opts.Attach),\n\t\tTargetBtfId: uint32(opts.BTF),\n\t\tFlags: opts.Flags,\n\t}\n\tfd, err := sys.LinkCreate(&attr)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't create link: %s\", err)\n\t}\n\n\treturn &RawLink{fd, \"\"}, nil\n}",
"func TestAttachStopOnEntry(t *testing.T) {\n\trunTest(t, \"loopprog\", func(client *daptest.Client, fixture protest.Fixture) {\n\t\t// Start the program to attach to\n\t\tcmd := exec.Command(fixture.Path)\n\t\tstdout, err := cmd.StdoutPipe()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tcmd.Stderr = os.Stderr\n\t\tif err := cmd.Start(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\t// Wait for output.\n\t\t// This will give the target process time to initialize the runtime before we attach,\n\t\t// so we can rely on having goroutines when they are requested on attach.\n\t\tscanOut := bufio.NewScanner(stdout)\n\t\tscanOut.Scan()\n\t\tif scanOut.Text() != \"past main\" {\n\t\t\tt.Errorf(\"expected loopprog.go to output \\\"past main\\\"\")\n\t\t}\n\n\t\t// 1 >> initialize, << initialize\n\t\tclient.InitializeRequest()\n\t\tinitResp := client.ExpectInitializeResponseAndCapabilities(t)\n\t\tif initResp.Seq != 0 || initResp.RequestSeq != 1 {\n\t\t\tt.Errorf(\"\\ngot %#v\\nwant Seq=0, RequestSeq=1\", initResp)\n\t\t}\n\n\t\t// 2 >> attach, << initialized, << attach\n\t\tclient.AttachRequest(\n\t\t\tmap[string]interface{}{\"mode\": \"local\", \"processId\": cmd.Process.Pid, \"stopOnEntry\": true, \"backend\": \"default\"})\n\t\tclient.ExpectCapabilitiesEventSupportTerminateDebuggee(t)\n\t\tinitEvent := client.ExpectInitializedEvent(t)\n\t\tif initEvent.Seq != 0 {\n\t\t\tt.Errorf(\"\\ngot %#v\\nwant Seq=0\", initEvent)\n\t\t}\n\t\tattachResp := client.ExpectAttachResponse(t)\n\t\tif attachResp.Seq != 0 || attachResp.RequestSeq != 2 {\n\t\t\tt.Errorf(\"\\ngot %#v\\nwant Seq=0, RequestSeq=2\", attachResp)\n\t\t}\n\n\t\t// 3 >> setBreakpoints, << setBreakpoints\n\t\tclient.SetBreakpointsRequest(fixture.Source, nil)\n\t\tsbpResp := client.ExpectSetBreakpointsResponse(t)\n\t\tif sbpResp.Seq != 0 || sbpResp.RequestSeq != 3 || len(sbpResp.Body.Breakpoints) != 0 {\n\t\t\tt.Errorf(\"\\ngot %#v\\nwant Seq=0, RequestSeq=3, len(Breakpoints)=0\", sbpResp)\n\t\t}\n\n\t\t// 4 >> setExceptionBreakpoints, << setExceptionBreakpoints\n\t\tclient.SetExceptionBreakpointsRequest()\n\t\tsebpResp := client.ExpectSetExceptionBreakpointsResponse(t)\n\t\tif sebpResp.Seq != 0 || sebpResp.RequestSeq != 4 {\n\t\t\tt.Errorf(\"\\ngot %#v\\nwant Seq=0, RequestSeq=4\", sebpResp)\n\t\t}\n\n\t\t// 5 >> configurationDone, << stopped, << configurationDone\n\t\tclient.ConfigurationDoneRequest()\n\t\tstopEvent := client.ExpectStoppedEvent(t)\n\t\tif stopEvent.Seq != 0 ||\n\t\t\tstopEvent.Body.Reason != \"entry\" ||\n\t\t\tstopEvent.Body.ThreadId != 1 ||\n\t\t\t!stopEvent.Body.AllThreadsStopped {\n\t\t\tt.Errorf(\"\\ngot %#v\\nwant Seq=0, Body={Reason=\\\"entry\\\", ThreadId=1, AllThreadsStopped=true}\", stopEvent)\n\t\t}\n\t\tcdResp := client.ExpectConfigurationDoneResponse(t)\n\t\tif cdResp.Seq != 0 || cdResp.RequestSeq != 5 {\n\t\t\tt.Errorf(\"\\ngot %#v\\nwant Seq=0, RequestSeq=5\", cdResp)\n\t\t}\n\n\t\t// 6 >> threads, << threads\n\t\tclient.ThreadsRequest()\n\t\ttResp := client.ExpectThreadsResponse(t)\n\t\t// Expect main goroutine plus runtime at this point.\n\t\tif tResp.Seq != 0 || tResp.RequestSeq != 6 || len(tResp.Body.Threads) < 2 {\n\t\t\tt.Errorf(\"\\ngot %#v\\nwant Seq=0, RequestSeq=6 len(Threads)>1\", tResp)\n\t\t}\n\n\t\t// 7 >> threads, << threads\n\t\tclient.ThreadsRequest()\n\t\tclient.ExpectThreadsResponse(t)\n\n\t\t// 8 >> stackTrace, << response\n\t\tclient.StackTraceRequest(1, 0, 20)\n\t\tclient.ExpectStackTraceResponse(t)\n\n\t\t// 9 >> stackTrace, << response\n\t\tclient.StackTraceRequest(1, 0, 20)\n\t\tclient.ExpectStackTraceResponse(t)\n\n\t\t// 10 >> evaluate, << error\n\t\tclient.EvaluateRequest(\"foo\", 0 /*no frame specified*/, \"repl\")\n\t\terResp := client.ExpectInvisibleErrorResponse(t)\n\t\tif erResp.Seq != 0 || erResp.RequestSeq != 10 || !checkErrorMessageId(erResp.Body.Error, UnableToEvaluateExpression) {\n\t\t\tt.Errorf(\"\\ngot %#v\\nwant Seq=0, RequestSeq=10 Id=%d\", erResp, UnableToEvaluateExpression)\n\t\t}\n\n\t\t// 11 >> evaluate, << evaluate\n\t\tclient.EvaluateRequest(\"1+1\", 0 /*no frame specified*/, \"repl\")\n\t\tevResp := client.ExpectEvaluateResponse(t)\n\t\tif evResp.Seq != 0 || evResp.RequestSeq != 11 || evResp.Body.Result != \"2\" {\n\t\t\tt.Errorf(\"\\ngot %#v\\nwant Seq=0, RequestSeq=10 Result=2\", evResp)\n\t\t}\n\n\t\t// 12 >> continue, << continue\n\t\tclient.ContinueRequest(1)\n\t\tcResp := client.ExpectContinueResponse(t)\n\t\tif cResp.Seq != 0 || cResp.RequestSeq != 12 {\n\t\t\tt.Errorf(\"\\ngot %#v\\nwant Seq=0, RequestSeq=12\", cResp)\n\t\t}\n\n\t\t// TODO(polina): once https://github.com/go-delve/delve/issues/2259 is\n\t\t// fixed, test with kill=false.\n\n\t\t// 13 >> disconnect, << disconnect\n\t\tclient.DisconnectRequestWithKillOption(true)\n\n\t\t// Disconnect consists of Halt + Detach.\n\t\t// Halt interrupts command in progress, which triggers\n\t\t// a stopped event in parallel with the disconnect\n\t\t// sequence. It might arrive before or during the sequence\n\t\t// or never if the server exits before it is sent.\n\t\tmsg := expectMessageFilterStopped(t, client)\n\t\tclient.CheckOutputEvent(t, msg)\n\t\tmsg = expectMessageFilterStopped(t, client)\n\t\tclient.CheckDisconnectResponse(t, msg)\n\t\tclient.ExpectTerminatedEvent(t)\n\n\t\t// If this call to KeepAlive isn't here there's a chance that stdout will\n\t\t// be garbage collected (since it is no longer alive long before this\n\t\t// point), when that happens, on unix-like OSes, the read end of the pipe\n\t\t// will be closed by the finalizer and the target process will die by\n\t\t// SIGPIPE, which the rest of this test does not expect.\n\t\truntime.KeepAlive(stdout)\n\t})\n}",
"func RunDriveAttach(c *CmdConfig) error {\n\tfn := func(das do.DriveActionsService) (*do.Action, error) {\n\t\tif len(c.Args) != 2 {\n\t\t\treturn nil, doctl.NewMissingArgsErr(c.NS)\n\t\t}\n\t\tdriveID := c.Args[0]\n\t\tdropletID, err := strconv.Atoi(c.Args[1])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\n\t\t}\n\t\ta, err := das.Attach(driveID, dropletID)\n\t\treturn a, err\n\t}\n\treturn performDriveAction(c, fn)\n}",
"func LoadProgram(fname, funcname, qidmapname, xskmapname string) (*Program, error) {\n\tprog := new(Program)\n\tcol, err := ebpf.LoadCollection(fname)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar ok bool\n\tif prog.Program, ok = col.Programs[funcname]; !ok {\n\t\treturn nil, fmt.Errorf(\"%v doesn't contain a function named %v\", fname, funcname)\n\t}\n\tif prog.Queues, ok = col.Maps[qidmapname]; !ok {\n\t\treturn nil, fmt.Errorf(\"%v doesn't contain a queue map named %v\", fname, qidmapname)\n\t}\n\tif prog.Sockets, ok = col.Maps[xskmapname]; !ok {\n\t\treturn nil, fmt.Errorf(\"%v doesn't contain a socket map named %v\", fname, xskmapname)\n\t}\n\treturn prog, nil\n}",
"func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error {\n\tif !c.ok() {\n\t\treturn errInvalidConn\n\t}\n\tso, ok := sockOpts[ssoAttachFilter]\n\tif !ok {\n\t\treturn errNotImplemented\n\t}\n\treturn so.setBPF(c.Conn, filter)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
CleanUpJumpMaps scans for cali_jump maps that are still pinned to the filesystem but no longer referenced by our BPF programs. | func CleanUpJumpMaps() {
// So that we serialise with AttachProgram()
log.Debug("CleanUpJumpMaps waiting for lock...")
tcLock.Lock()
defer tcLock.Unlock()
log.Debug("CleanUpJumpMaps got lock, cleaning up...")
// Find the maps we care about by walking the BPF filesystem.
mapIDToPath := make(map[int]string)
err := filepath.Walk("/sys/fs/bpf/tc", func(p string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if strings.HasPrefix(info.Name(), "cali_jump") {
log.WithField("path", p).Debug("Examining map")
out, err := exec.Command("bpftool", "map", "show", "pinned", p).Output()
if err != nil {
log.WithError(err).Panic("Failed to show map")
}
log.WithField("dump", string(out)).Debug("Map show before deletion")
idStr := string(bytes.Split(out, []byte(":"))[0])
id, err := strconv.Atoi(idStr)
if err != nil {
log.WithError(err).WithField("dump", string(out)).Error("Failed to parse bpftool output.")
return err
}
mapIDToPath[id] = p
}
return nil
})
if os.IsNotExist(err) {
log.WithError(err).Warn("tc directory missing from BPF file system?")
return
}
if err != nil {
log.WithError(err).Error("Error while looking for maps.")
}
// Find all the programs that are attached to interfaces.
out, err := exec.Command("bpftool", "net", "-j").Output()
if err != nil {
log.WithError(err).Panic("Failed to list attached bpf programs")
}
log.WithField("dump", string(out)).Debug("Attached BPF programs")
var attached []struct {
TC []struct {
DevName string `json:"devname"`
ID int `json:"id"`
} `json:"tc"`
}
err = json.Unmarshal(out, &attached)
if err != nil {
log.WithError(err).WithField("dump", string(out)).Error("Failed to parse list of attached BPF programs")
}
attachedProgs := set.New()
for _, prog := range attached[0].TC {
log.WithField("prog", prog).Debug("Adding prog to attached set")
attachedProgs.Add(prog.ID)
}
// Find all the maps that the attached programs refer to and remove them from consideration.
progsJSON, err := exec.Command("bpftool", "prog", "list", "--json").Output()
if err != nil {
log.WithError(err).Info("Failed to list BPF programs, assuming there's nothing to clean up.")
return
}
var progs []struct {
ID int `json:"id"`
Name string `json:"name"`
Maps []int `json:"map_ids"`
}
err = json.Unmarshal(progsJSON, &progs)
if err != nil {
log.WithError(err).Info("Failed to parse bpftool output. Assuming nothing to clean up.")
return
}
for _, p := range progs {
if !attachedProgs.Contains(p.ID) {
log.WithField("prog", p).Debug("Prog is not in the attached set, skipping")
continue
}
for _, id := range p.Maps {
log.WithField("mapID", id).WithField("prog", p).Debug("Map is still in use")
delete(mapIDToPath, id)
}
}
// Remove the pins.
for id, p := range mapIDToPath {
log.WithFields(log.Fields{"id": id, "path": p}).Debug("Removing stale BPF map pin.")
err := os.Remove(p)
if err != nil {
log.WithError(err).Warn("Removed stale BPF map pin.")
}
log.WithFields(log.Fields{"id": id, "path": p}).Info("Removed stale BPF map pin.")
}
// Look for empty dirs.
emptyAutoDirs := set.New()
err = filepath.Walk("/sys/fs/bpf/tc", func(p string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() && tcDirRegex.MatchString(info.Name()) {
p := path.Clean(p)
log.WithField("path", p).Debug("Found tc auto-created dir.")
emptyAutoDirs.Add(p)
} else {
dirPath := path.Clean(path.Dir(p))
log.WithField("path", dirPath).Debug("tc dir is not empty.")
emptyAutoDirs.Discard(dirPath)
}
return nil
})
if os.IsNotExist(err) {
log.WithError(err).Warn("tc directory missing from BPF file system?")
return
}
if err != nil {
log.WithError(err).Error("Error while looking for maps.")
}
emptyAutoDirs.Iter(func(item interface{}) error {
p := item.(string)
log.WithField("path", p).Debug("Removing empty dir.")
err := os.Remove(p)
if err != nil {
log.WithError(err).Error("Error while removing empty dir.")
}
return nil
})
} | [
"func CleanupPerClusterCTMaps(ipv4, ipv6 bool) error {\n\tmaps := NewPerClusterCTMaps(ipv4, ipv6)\n\treturn maps.cleanup()\n}",
"func cleanUpMaps(o *ORSet, key string) {\n\t// go through add and remove Maps and delete similar timestamps\n\trm, ok := o.removeMap[key]\n\tif ok {\n\t\tam, ok := o.addMap[key]\n\t\tif ok {\n\t\t\t// go through all timestamps in removeMap and delete in both maps\n\t\t\tfor ts, _ := range rm {\n\t\t\t\tdelete(am, ts)\n\t\t\t\tdelete(rm, ts)\n\t\t\t}\n\t\t}\n\t}\n\t// Remove empty map values\n\tif _, ok := o.removeMap[key]; !ok {\n\t\tdelete(o.removeMap, key)\n\t}\n\tif _, ok := o.addMap[key]; !ok {\n\t\tdelete(o.addMap, key)\n\t}\n}",
"func (msgCache *cgMsgCache) shutdownCleanupEntries() {\n\tfor id := range msgCache.msgMap {\n\t\tdelete(msgCache.msgMap, id)\n\t}\n}",
"func cleanConfigMaps(configMaps []*v1.ConfigMap) []*v1.ConfigMap {\n\tcleanedConfigMaps := make([]*v1.ConfigMap, 0, len(configMaps))\n\tfor _, cfgMap := range configMaps {\n\t\tif cfgMap.GetName() != \"kube-root-ca.crt\" {\n\t\t\tcleanedConfigMaps = append(cleanedConfigMaps, cfgMap)\n\t\t}\n\t}\n\treturn cleanedConfigMaps\n}",
"func (c *client) ClearMap(name string, forceDelete bool) error {\n\tif len(c.runtimes) == 0 {\n\t\treturn fmt.Errorf(\"no valid runtimes found\")\n\t}\n\tname, err := c.GetMapsPath(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif forceDelete {\n\t\tif err := os.Remove(name); err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\treturn native_errors.ErrNotFound\n\t\t\t}\n\t\t\treturn fmt.Errorf(strings.Join([]string{err.Error(), native_errors.ErrNotFound.Error()}, \" \"))\n\t\t}\n\t}\n\n\tvar lastErr error\n\tfor _, runtime := range c.runtimes {\n\t\terr := runtime.ClearMap(name)\n\t\tif err != nil {\n\t\t\tlastErr = err\n\t\t}\n\t}\n\tif lastErr != nil {\n\t\treturn lastErr\n\t}\n\treturn nil\n}",
"func (gmap *Map) Cleanup(world *World) error {\n\tif gmap.handlers.cleanupFunc != nil {\n\t\tgmap.handlers.cleanupFunc()\n\t}\n\n\tfor y := range gmap.tiles {\n\t\tfor x := range gmap.tiles[y] {\n\t\t\tfor z := range gmap.tiles[y][x] {\n\t\t\t\tfor _, o := range gmap.tiles[y][x][z].objects {\n\t\t\t\t\tworld.objectIDs.free(o.GetID())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (bl *BpfLoader) cleanDurMap() {\n\tif bl.cleanTimer == 0 {\n\t\tbl.cleanTimer = time.Now().Unix()\n\t\treturn\n\t}\n\tif (time.Now().Unix() - bl.cleanTimer) < 300 {\n\t\treturn\n\t}\n\tif v, ok := bl.eBPFMaps[bl.eBPFMapName[\"dur\"]]; ok {\n\t\thelpers.Debug(\"%s\", \"MapCleaner started.\")\n\t\tvar key struct {\n\t\t\tSrcIP uint32\n\t\t\tDstIP uint32\n\t\t\tSrcPort uint16\n\t\t\tDstPort uint16\n\t\t}\n\t\tvar val uint64\n\n\t\tcount := 0\n\t\tfor v.Iterate().Next(&key, &val) {\n\t\t\tif count > 1 {\n\t\t\t\t// Check if timestamp from map is older than 120s. In edge cases where\n\t\t\t\t// response is slow maybe because a lot of data is being sent in the\n\t\t\t\t// request we may end up cleaning usefull entries. These entries would\n\t\t\t\t// pile up because they are slow.\n\t\t\t\t// In the map val is in ns.\n\t\t\t\tif tnow, _ := helpers.GetMtime(); val <= (tnow - 120000000000) {\n\t\t\t\t\thelpers.Debug(\"MapCleaner detected more elements in map %s. Running cleanup for key with IPs: src:%d dst:%d; ports: src:%d dst:%d, with val: %d, tnow: %d\",\n\t\t\t\t\t\tbl.eBPFMapName[\"dur\"], helpers.Ntohl(key.SrcIP), helpers.Ntohl(key.DstIP), helpers.Ntohs(key.SrcPort), helpers.Ntohs(key.DstPort), val, tnow)\n\t\t\t\t\tif err := v.Delete(&key); err != nil {\n\t\t\t\t\t\tklog.Errorln(\"MapCleaner could not delete entry in map: \",\n\t\t\t\t\t\t\tbl.eBPFMapName[\"dur\"])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tcount++\n\t\t}\n\n\t} else {\n\t\thelpers.Debug(\"MapCleaner not running because map %s is not loaded\", bl.eBPFMapName[\"dur\"])\n\t}\n\tbl.cleanTimer = time.Now().Unix()\n}",
"func (p *gceBuildletPool) cleanUpOldVMs() {\n\tif *mode == \"dev\" {\n\t\treturn\n\t}\n\tif computeService == nil {\n\t\treturn\n\t}\n\n\t// TODO(bradfitz): remove this list and just query it from the compute API?\n\t// http://godoc.org/google.golang.org/api/compute/v1#RegionsService.Get\n\t// and Region.Zones: http://godoc.org/google.golang.org/api/compute/v1#Region\n\n\tfor {\n\t\tfor _, zone := range buildEnv.ZonesToClean {\n\t\t\tif err := p.cleanZoneVMs(zone); err != nil {\n\t\t\t\tlog.Printf(\"Error cleaning VMs in zone %q: %v\", zone, err)\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(time.Minute)\n\t}\n}",
"func (m *Map) Reset() {\n\tfor hash, entry := range m.lookup {\n\t\tm.removeMapKey(hash, entry.key)\n\t}\n}",
"func (plugin *InterfaceConfigurator) clearMapping() error {\n\tplugin.swIfIndexes.Clear()\n\tplugin.dhcpIndexes.Clear()\n\tplugin.uIfaceCache = make(map[string]string)\n\tplugin.vxlanMulticastCache = make(map[string]*intf.Interfaces_Interface)\n\tplugin.memifScCache = make(map[string]uint32)\n\treturn nil\n}",
"func (s *BaseScssParserListener) ExitMap_(ctx *Map_Context) {}",
"func cleanPrevRemoteMap() {\n\tremoteMap.Range(func(key interface{}, value interface{}) bool {\n\t\tremoteMap.Delete(key)\n\t\treturn true\n\t})\n}",
"func (mr *Master) cleanupFiles() {\n\tfor i := range mr.files {\n\t\tfor j := 0; j < mr.nReduce; j++ {\n\t\t\tremoveFile(reduceName(mr.jobName, i, j))\n\t\t}\n\t}\n\tfor i := 0; i < mr.nReduce; i++ {\n\t\tremoveFile(mergeName(mr.jobName, i))\n\t}\n\tremoveFile(\"mrtmp.\" + mr.jobName)\n}",
"func (p Proxy) ClearMap() {\n\t// TODO clearPrefix\n\n\t// clear the length counter\n\tp.Delete()\n}",
"func clearPreBindings(virtualPlacement map[int32][]CellList) {\n\tfor _, podPlacements := range virtualPlacement {\n\t\tfor _, podGpus := range podPlacements {\n\t\t\tfor _, gpu := range podGpus {\n\t\t\t\tfor gpu != nil {\n\t\t\t\t\tvGpu := gpu.(*VirtualCell)\n\t\t\t\t\tif pGpu := vGpu.GetPreBoundPhysicalCell(); pGpu != nil {\n\t\t\t\t\t\tpGpu.SetPreBoundVirtualCell(nil)\n\t\t\t\t\t\tvGpu.SetPreBoundPhysicalCell(nil)\n\t\t\t\t\t\tgpu = gpu.GetParent()\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}",
"func mvUrlMaps(crawlUrl UrlInfo,pendCrawlCnt int,\n\t crawlStatMap map[string]CrawlStat,crawlListMap map[string]int) int {\n var ap CrawlStat\n\tap.Crawled = false; ap.Depth = crawlUrl.Depth; ap.Running = true\n\tap.Log = LogInfo{}\n\tcrawlStatMap[crawlUrl.UrlN] = ap\n\tpendCrawlCnt = pendCrawlCnt + 1\n\tdelete(crawlListMap,crawlUrl.UrlN)\n\treturn pendCrawlCnt\n}",
"func cleanupEntry(entry []UA_NodeMapEntry) {\n\tif int((int((UA_Boolean(entry[0].deleted))))) != 0 && int(uint16((uint16((uint16_t((UA_UInt16(entry[0].refCount)))))))) == 0 {\n\t\tdeleteEntry(entry)\n\t}\n}",
"func NewMapCleaner(emap *cebpf.Map, key, val interface{}) (*MapCleaner, error) {\n\t// we force types to be of pointer kind because of the reasons mentioned above\n\tif reflect.ValueOf(key).Kind() != reflect.Ptr {\n\t\treturn nil, fmt.Errorf(\"%T is not a pointer kind\", key)\n\t}\n\tif reflect.ValueOf(val).Kind() != reflect.Ptr {\n\t\treturn nil, fmt.Errorf(\"%T is not a pointer kind\", val)\n\t}\n\n\treturn &MapCleaner{\n\t\temap: emap,\n\t\tkey: key,\n\t\tval: val,\n\t\tkeyPtr: unsafe.Pointer(reflect.ValueOf(key).Elem().Addr().Pointer()),\n\t\tvalPtr: unsafe.Pointer(reflect.ValueOf(val).Elem().Addr().Pointer()),\n\t\tdone: make(chan struct{}),\n\t}, nil\n}",
"func CleanProjectMapping() {\n\tprojects := viper.GetStringMapStringSlice(LocalProjectsConfigKey)\n\n\tfor namespace, paths := range projects {\n\t\tfor _, path := range paths {\n\t\t\tif !fileutils.DirExists(path) {\n\t\t\t\tdelete(projects, namespace)\n\t\t\t}\n\t\t}\n\t}\n\n\tviper.Set(\"projects\", projects)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
EnsureQdisc makes sure that qdisc is attached to the given interface | func EnsureQdisc(ifaceName string) {
// FIXME Avoid flapping the tc program and qdisc
cmd := exec.Command("tc", "qdisc", "del", "dev", ifaceName, "clsact")
_ = cmd.Run()
cmd = exec.Command("tc", "qdisc", "add", "dev", ifaceName, "clsact")
_ = cmd.Run()
} | [
"func (t *tcShaper) interfaceExists() (bool, string, error) {\n\tdata, err := t.e.Command(\"tc\", \"qdisc\", \"show\", \"dev\", t.iface).CombinedOutput()\n\tif err != nil {\n\t\treturn false, \"\", err\n\t}\n\tvalue := strings.TrimSpace(string(data))\n\tif len(value) == 0 {\n\t\treturn false, \"\", nil\n\t}\n\t// Newer versions of tc and/or the kernel return the following instead of nothing:\n\t// qdisc noqueue 0: root refcnt 2\n\tfields := strings.Fields(value)\n\tif len(fields) > 1 && fields[1] == \"noqueue\" {\n\t\treturn false, \"\", nil\n\t}\n\treturn true, value, nil\n}",
"func validateQoS(hc *container.HostConfig) error {\n\treturn nil\n}",
"func (wgi *WireguardInterface) EnsureInterfaceIsUp() error {\n\n\t// bring up wireguard interface\n\tcmd := exec.Command(\"/sbin/ip\", \"--br\", \"link\", \"show\", \"dev\", wgi.InterfaceName, \"up\", \"type\", \"wireguard\")\n\tvar stdout, stderr bytes.Buffer\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\toutStr, errStr := string(stdout.Bytes()), string(stderr.Bytes())\n\tif len(errStr) > 0 {\n\t\te := fmt.Sprintf(\"/sbin/ip reported: %s\", errStr)\n\t\treturn errors.New(e)\n\t}\n\tif len(outStr) > 0 {\n\t\t// output should contain interface name an \"UP\" TODO\n\t\tlog.WithField(\"o\", outStr).Trace(\"Interface is up\")\n\t\treturn nil\n\t}\n\n\t// bring up wireguard interface\n\tcmd = exec.Command(\"/sbin/ip\", \"link\", \"set\", \"up\", \"dev\", wgi.InterfaceName)\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\terr = cmd.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, errStr = string(stdout.Bytes()), string(stderr.Bytes())\n\tif len(errStr) > 0 {\n\t\te := fmt.Sprintf(\"/sbin/ip reported: %s\", errStr)\n\t\treturn errors.New(e)\n\t}\n\n\treturn nil\n}",
"func validateQoS(_ *container.HostConfig) error {\n\treturn nil\n}",
"func checkEnqueue(q Queue, e interface{}, cnt *int) error {\n\tq.Enqueue(e)\n\t*cnt++\n\tif err := checkLength(q, *cnt); err != nil {\n\t\treturn fmt.Errorf(\"q.Enqueue(%v) got %v\", e, err)\n\t}\n\treturn nil\n}",
"func shouldInitErasureDisks(errs []error) bool {\n\treturn countErrs(errs, errUnformattedDisk) == len(errs)\n}",
"func (c *RabbitMQClient) Connect() error {\n\tfor _, q := range c.Cfg.Queues {\n\t\tif q.SkipDeclare {\n\t\t\tc.Log.Info(fmt.Sprintf(\"Skipping declaration of queue: %s\", q.Name))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Declare exchange and DLE if an exchange is specified and SkipDeclare is false\n\t\tif q.Exchange != nil && !q.Exchange.SkipDeclare {\n\t\t\terr := c.declareExchange(q.Exchange)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// If AutoDLQ is true, provision DLQ for this queue\n\t\tif q.AutoDLQ {\n\t\t\terr := c.declareQueueWithDLQ(q)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\terr := c.declareQueue(q)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (m *NvmeSubsystemHostInlineIoQueue) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCount(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDepth(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func AttachInterfaceToNetwork(machineId string, n int, def shared.InterfaceDef) error {\n\terr := system.EbtablesFlush(MachineNicName(machineId, n))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = system.EbtablesAllowTraffic(MachineNicName(machineId, n), def.MAC, def.IP)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = system.SetInterfaceMaster(MachineNicName(machineId, n), NetworkNicName(def.Network))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tpps, err := system.GetInterfacePPS(MachineNicName(machineId, n), \"rx\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Monitor failed for machine %s: %s\\n\", machineId, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif pps > 80000 {\n\t\t\t\tlog.Printf(\"Monitor DDOS alert for machine %s: shutting down interface\\n\", machineId)\n\n\t\t\t\terr := system.DownInterface(MachineNicName(machineId, n))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(\"Monitor failed to shut down interface after DDOS alter. Machine\", machineId)\n\t\t\t\t}\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttime.Sleep(10 * time.Second)\n\t\t}\n\t}()\n\n\treturn nil\n}",
"func (c *RabbitMQClient) declareQueue(q queue.RabbitMQQueue) error {\n\tcq, err := c.Channel.QueueDeclare(\n\t\tq.Name,\n\t\tq.Durable, // durable\n\t\tq.AutoDelete, // auto-delete\n\t\tq.Exclusive, // exclusive\n\t\tq.NoWait, // no-wait\n\t\tnil, // arguments\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Bind to exchange if such is specified. Otherwise the queue declared above will bind to AMQP Default\n\tif q.Exchange != nil {\n\t\terr = c.Channel.QueueBind(cq.Name, q.Name, q.Exchange.Name, true, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (wgi *WireguardInterface) EnsureIPAddressIsAssigned() error {\n\n\tvar err error\n\n\ti, err := net.InterfaceByName(wgi.InterfaceName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.WithField(\"intfName\", i.Name).Tracef(\"found wg interface\")\n\n\t// Assign IP if not yet present\n\ta, err := i.Addrs()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(a) == 0 {\n\t\tcmd := exec.Command(\"/sbin/ip\", \"address\", \"add\", \"dev\", wgi.InterfaceName, wgi.IP.String())\n\t\tvar stdout, stderr bytes.Buffer\n\t\tcmd.Stdout = &stdout\n\t\tcmd.Stderr = &stderr\n\t\terr := cmd.Run()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, errStr := string(stdout.Bytes()), string(stderr.Bytes())\n\t\tif len(errStr) > 0 {\n\t\t\te := fmt.Sprintf(\"/sbin/ip reported: %s\", errStr)\n\t\t\treturn errors.New(e)\n\t\t}\n\t}\n\n\ta, err = i.Addrs()\n\tif len(a) == 0 {\n\t\te := fmt.Sprintf(\"unable to add ip address %s to interface %s: %s\", wgi.IP.String(), wgi.InterfaceName, err)\n\t\treturn errors.New(e)\n\t}\n\tlog.WithFields(log.Fields{\n\t\t\"intfName\": i.Name,\n\t\t\"ip\": a[0],\n\t}).Tracef(\"added ip to interface\")\n\n\treturn nil\n}",
"func setupNetwork(conn *urpc.Client, pid int, conf *config.Config) error {\n\tlog.Infof(\"Setting up network\")\n\n\tswitch conf.Network {\n\tcase config.NetworkNone:\n\t\tlog.Infof(\"Network is disabled, create loopback interface only\")\n\t\tif err := createDefaultLoopbackInterface(conf, conn); err != nil {\n\t\t\treturn fmt.Errorf(\"creating default loopback interface: %v\", err)\n\t\t}\n\tcase config.NetworkSandbox:\n\t\t// Build the path to the net namespace of the sandbox process.\n\t\t// This is what we will copy.\n\t\tnsPath := filepath.Join(\"/proc\", strconv.Itoa(pid), \"ns/net\")\n\t\tif err := createInterfacesAndRoutesFromNS(conn, nsPath, conf); err != nil {\n\t\t\treturn fmt.Errorf(\"creating interfaces from net namespace %q: %v\", nsPath, err)\n\t\t}\n\tcase config.NetworkHost:\n\t\t// Nothing to do here.\n\tdefault:\n\t\treturn fmt.Errorf(\"invalid network type: %v\", conf.Network)\n\t}\n\treturn nil\n}",
"func (pr *PodRequest) ConfigureInterface(namespace string, podName string, macAddress string, ipAddress string, gatewayIP string, mtu int, ingress, egress int64) ([]*current.Interface, error) {\n\tnetns, err := ns.GetNS(pr.Netns)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to open netns %q: %v\", pr.Netns, err)\n\t}\n\tdefer netns.Close()\n\n\thostIface, contIface, err := setupInterface(netns, pr.SandboxID, pr.IfName, macAddress, ipAddress, gatewayIP, mtu)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tifaceID := fmt.Sprintf(\"%s_%s\", namespace, podName)\n\n\tovsArgs := []string{\n\t\t\"add-port\", \"br-int\", hostIface.Name, \"--\", \"set\",\n\t\t\"interface\", hostIface.Name,\n\t\tfmt.Sprintf(\"external_ids:attached_mac=%s\", macAddress),\n\t\tfmt.Sprintf(\"external_ids:iface-id=%s\", ifaceID),\n\t\tfmt.Sprintf(\"external_ids:ip_address=%s\", ipAddress),\n\t\tfmt.Sprintf(\"external_ids:sandbox=%s\", pr.SandboxID),\n\t}\n\tif out, err := ovsExec(ovsArgs...); err != nil {\n\t\treturn nil, fmt.Errorf(\"failure in plugging pod interface: %v\\n %q\", err, out)\n\t}\n\n\tif err := clearPodBandwidth(pr.SandboxID); err != nil {\n\t\treturn nil, err\n\t}\n\tif ingress > 0 || egress > 0 {\n\t\tl, err := netlink.LinkByName(hostIface.Name)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to find host veth interface %s: %v\", hostIface.Name, err)\n\t\t}\n\t\terr = netlink.LinkSetTxQLen(l, 1000)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to set host veth txqlen: %v\", err)\n\t\t}\n\n\t\tif err := setPodBandwidth(pr.SandboxID, hostIface.Name, ingress, egress); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn []*current.Interface{hostIface, contIface}, nil\n}",
"func MustBindQueue(ch *amqp.Channel, exchange, qName string, routerKeys []string, args amqp.Table) {\n\tq, err := ch.QueueDeclare(\n\t\tqName, // name\n\t\ttrue, // durable\n\t\tfalse, // delete when usused\n\t\tfalse, // exclusive\n\t\tfalse, // no-wait\n\t\targs, // arguments\n\t)\n\tfailOnError(err, \"Failed to declare a queue\")\n\n\tfor _, s := range routerKeys {\n\t\tlog.Printf(\"Binding queue %s to exchange %s with routing key %s\",\n\t\t\tq.Name, exchange, s)\n\n\t\terr := ch.QueueBind(\n\t\t\tq.Name, // queue name\n\t\t\ts, // routing key\n\t\t\texchange, // exchange\n\t\t\tfalse,\n\t\t\tnil,\n\t\t)\n\n\t\tfailOnError(err, \"Failed to bind a queue\")\n\t}\n\n}",
"func setupQueue(options Options) (*Connector, error) {\n\n\tconn, err := amqp.Dial(\n\t\tfmt.Sprintf(\n\t\t\tUriTemplate,\n\t\t\toptions.Dial.User,\n\t\t\toptions.Dial.Password,\n\t\t\toptions.Dial.Host,\n\t\t\toptions.Dial.Port,\n\t\t))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmqChannel, err := conn.Channel()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// declare a not direct exchange if ExchangeName is provided\n\tif options.Exchange.Name != \"\" {\n\t\terr = mqChannel.ExchangeDeclare(\n\t\t\toptions.Exchange.Name,\n\t\t\toptions.Exchange.Kind,\n\t\t\toptions.Exchange.Durable,\n\t\t\toptions.Exchange.AutoDelete,\n\t\t\toptions.Exchange.Internal,\n\t\t\toptions.Exchange.NoWait,\n\t\t\toptions.Exchange.Args,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tmQueue, err := mqChannel.QueueDeclare(\n\t\toptions.Queue.Name,\n\t\toptions.Queue.Durable,\n\t\toptions.Queue.AutoDelete,\n\t\toptions.Queue.Exclusive,\n\t\toptions.Queue.NoWait,\n\t\toptions.Queue.Args,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif options.Exchange.Name != \"\" {\n\t\terr = mqChannel.QueueBind(\n\t\t\tmQueue.Name,\n\t\t\toptions.Bind.RoutingKey,\n\t\t\toptions.Exchange.Name,\n\t\t\toptions.Bind.NoWait,\n\t\t\toptions.Bind.Args,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &Connector{\n\t\toptions: options,\n\t\tch: mqChannel,\n\t\tcn: conn,\n\t\tterminateCh: make(chan struct{}, 1),\n\t}, nil\n}",
"func MustDeclareConsumer(ch *amqp.Channel, exchange, qName string, routerKeys []string, args amqp.Table) (*amqp.Channel, <-chan amqp.Delivery) {\n\tq, err := ch.QueueDeclare(\n\t\tqName, // name\n\t\tfalse, // durable\n\t\tfalse, // delete when usused\n\t\tfalse, // exclusive\n\t\tfalse, // no-wait\n\t\targs, // arguments\n\t)\n\tfailOnError(err, \"Failed to declare a queue\")\n\n\tfor _, s := range routerKeys {\n\t\tlog.Printf(\"Binding queue %s to exchange %s with routing key %s\",\n\t\t\tq.Name, exchange, s)\n\n\t\terr := ch.QueueBind(\n\t\t\tq.Name, // queue name\n\t\t\ts, // routing key\n\t\t\texchange, // exchange\n\t\t\tfalse,\n\t\t\tnil,\n\t\t)\n\t\tfailOnError(err, \"Failed to bind a queue\")\n\t}\n\n\terr = ch.Qos(\n\t\t5, // prefetch count\n\t\t0, // prefetch size\n\t\tfalse, // global\n\t)\n\tfailOnError(err, \"Failed to set QoS\")\n\n\tmsgs, err := ch.Consume(\n\t\tq.Name, // queue\n\t\t\"\", // consumer\n\t\tfalse, // auto ack\n\t\tfalse, // exclusive\n\t\tfalse, // no local\n\t\tfalse, // no wait\n\t\tnil, // args\n\t)\n\n\tfailOnError(err, \"Failed to register a consumer\")\n\n\treturn ch, msgs\n}",
"func isPhysicalIface(ifaceName string) (bool, error) {\n\tif ifaceName == \"lo\" {\n\t\treturn false, nil\n\t}\n\n\tethHandle, err := ethtool.NewEthtool()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer ethHandle.Close()\n\n\tbus, err := ethHandle.BusInfo(ifaceName)\n\tif err != nil {\n\t\treturn false, nil\n\t}\n\n\t// Check for a pci bus format\n\ttokens := strings.Split(bus, \":\")\n\tif len(tokens) != 3 {\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}",
"func checkDisks(profile *hwcc.HardwareClassification, host *bmh.BareMetalHost) bool {\n\tdiskDetails := profile.Spec.HardwareCharacteristics.Disk\n\tif diskDetails == nil {\n\t\treturn true\n\t}\n\n\tnewDisk := host.Status.HardwareDetails.Storage\n\tif diskDetails.DiskSelector != nil {\n\n\t\tfilteredDisk, matched := checkDisk(diskDetails.DiskSelector, host.Status.HardwareDetails.Storage)\n\n\t\tif !matched {\n\t\t\tlog.Info(\"Disk Pattern\",\n\t\t\t\t\"host\", host.Name,\n\t\t\t\t\"profile\", profile.Name,\n\t\t\t\t\"namespace\", host.Namespace,\n\t\t\t\t\"ok\", false,\n\t\t\t)\n\t\t\treturn false\n\t\t} else if len(filteredDisk) > 0 {\n\t\t\tnewDisk = filteredDisk\n\t\t}\n\t}\n\n\tok := checkRangeInt(\n\t\tdiskDetails.MinimumCount,\n\t\tdiskDetails.MaximumCount,\n\t\tlen(newDisk),\n\t)\n\tlog.Info(\"Disk Pattern\",\n\t\t\"host\", host.Name,\n\t\t\"profile\", profile.Name,\n\t\t\"namespace\", host.Namespace,\n\t\t\"minCount\", diskDetails.MinimumCount,\n\t\t\"maxCount\", diskDetails.MaximumCount,\n\t\t\"actualCount\", len(newDisk),\n\t\t\"ok\", ok,\n\t)\n\tif !ok {\n\t\treturn false\n\t}\n\n\tfor i, disk := range newDisk {\n\n\t\t// The disk size is reported on the host in bytes and the\n\t\t// classification rule is given in GB, so we have to convert\n\t\t// the values to the same units. Reducing bytes to GiB loses\n\t\t// detail, so we convert GB to bytes.\n\t\tminSize := bmh.Capacity(diskDetails.MinimumIndividualSizeGB) * bmh.GigaByte\n\t\tmaxSize := bmh.Capacity(diskDetails.MaximumIndividualSizeGB) * bmh.GigaByte\n\n\t\tok := checkRangeCapacity(\n\t\t\tminSize,\n\t\t\tmaxSize,\n\t\t\tdisk.SizeBytes,\n\t\t)\n\t\tlog.Info(\"DiskSize\",\n\t\t\t\"host\", host.Name,\n\t\t\t\"profile\", profile.Name,\n\t\t\t\"namespace\", host.Namespace,\n\t\t\t\"minSize\", minSize,\n\t\t\t\"maxSize\", maxSize,\n\t\t\t\"actualSize\", disk.SizeBytes,\n\t\t\t\"diskNum\", i,\n\t\t\t\"diskName\", disk.Name,\n\t\t\t\"ok\", ok,\n\t\t)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func configureInterface(netns ns.NetNS, ifName string, result *current.Result) error {\n\treturn netns.Do(func(_ ns.NetNS) error {\n\t\terr := ipam.ConfigureIface(ifName, result)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = addRouteInContainer(ifName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SQL returns the SQL filter. | func (f FUOTADeploymentFilters) SQL() string {
var filters []string
var nullDevEUI lorawan.EUI64
if f.DevEUI != nullDevEUI {
filters = append(filters, "fdd.dev_eui = :dev_eui")
}
if f.ApplicationID != 0 {
filters = append(filters, "d.application_id = :application_id")
}
if len(filters) == 0 {
return ""
}
return "where " + strings.Join(filters, " and ")
} | [
"func FilterSQL(sql string) string {\n\treturn sqlCharPattern.ReplaceAllStringFunc(sql, func(match string) string {\n\t\treturn escapeRep[match]\n\t})\n}",
"func (f *FilterQuery) getFilterConditionSQL() {\n\tswitch f.condition {\n\tcase \"like\":\n\t\tf.ConditionSQL = fmt.Sprintf(\" %v like ? \", f.queryParam)\n\t\tf.ValueSQL = fmt.Sprintf(\"%v%v%v\", \"%\", f.QueryValue, \"%\")\n\t\tbreak\n\tcase \"ilike\":\n\t\tf.ConditionSQL = fmt.Sprintf(\" %v ilike ? \", f.queryParam)\n\t\tf.ValueSQL = fmt.Sprintf(\"%v%v%v\", \"%\", f.QueryValue, \"%\")\n\t\tbreak\n\tcase \"in\":\n\t\tf.ConditionSQL = fmt.Sprintf(\" %v in (?) \", f.queryParam)\n\t\tf.ValueSQL = strings.Split(f.QueryValue, \",\")\n\t\tbreak\n\tcase \"notin\":\n\t\tf.ConditionSQL = fmt.Sprintf(\" %v not in (?) \", f.queryParam)\n\t\tf.ValueSQL = strings.Split(f.QueryValue, \",\")\n\t\tbreak\n\tcase \"start\":\n\t\tf.ConditionSQL = fmt.Sprintf(\" %v >= ? \", f.queryParam)\n\t\tf.ValueSQL = fmt.Sprintf(\"%v%v\", f.QueryValue, \" 00:00:00\")\n\t\tbreak\n\tcase \"end\":\n\t\tf.ConditionSQL = fmt.Sprintf(\" %v <= ? \", f.queryParam)\n\t\tf.ValueSQL = fmt.Sprintf(\"%v%v\", f.QueryValue, \" 23:59:59\")\n\t\tbreak\n\tcase \"isnull\":\n\t\tf.ConditionSQL = fmt.Sprintf(\" %v is not null \", f.queryParam)\n\t\tf.ValueSQL = nil\n\t\tif f.QueryValue == \"true\" {\n\t\t\tf.ConditionSQL = fmt.Sprintf(\" %v is null \", f.queryParam)\n\t\t}\n\t\tbreak\n\tcase \"lt\":\n\t\tf.ConditionSQL = fmt.Sprintf(\" %v < ? \", f.queryParam)\n\t\tf.ValueSQL = f.QueryValue\n\t\tbreak\n\tcase \"lte\":\n\t\tf.ConditionSQL = fmt.Sprintf(\" %v <= ? \", f.queryParam)\n\t\tf.ValueSQL = f.QueryValue\n\t\tbreak\n\tcase \"gt\":\n\t\tf.ConditionSQL = fmt.Sprintf(\" %v > ? \", f.queryParam)\n\t\tf.ValueSQL = f.QueryValue\n\t\tbreak\n\tcase \"gte\":\n\t\tf.ConditionSQL = fmt.Sprintf(\" %v >= ? \", f.queryParam)\n\t\tf.ValueSQL = f.QueryValue\n\t\tbreak\n\tcase \"isempty\":\n\t\tf.ConditionSQL = fmt.Sprintf(\" (COALESCE(\\\"%v\\\"::varchar ,'') != '' ) \", f.queryParam)\n\t\tf.ValueSQL = nil\n\t\tif f.QueryValue == \"true\" {\n\t\t\tf.ConditionSQL = fmt.Sprintf(\" (COALESCE(\\\"%v\\\"::varchar ,'') = '' ) \", f.queryParam)\n\t\t\tf.ValueSQL = nil\n\t\t}\n\t\tbreak\n\tdefault:\n\t\tf.ConditionSQL = fmt.Sprintf(\" %v = ? \", f.queryParam)\n\t\tf.ValueSQL = f.QueryValue\n\n\t}\n\n\t// \tbreak\n\treturn\n}",
"func conditionSql(scope scope) (sql string, vars []interface{}) {\n\tdb, _ := gorm.Open(\"postgres\", &gosql.DB{})\n\tds := scope.scope(&db).NewScope(nil)\n\tsql = strings.TrimSpace(ds.CombinedConditionSql())\n\tvars = ds.SqlVars\n\treturn\n}",
"func (sqlBuilder *SQLBuilder) whereToSQL() string {\n\tif sqlBuilder.Type == \"sqlite\" {\n\t\treturn sqlBuilder.whereToSQLSqlite()\n\t}\n\treturn \"1=1\"\n}",
"func (db *DB) SQL(filename string) string {\n\treturn sql(filename)\n}",
"func (s rawSQL) RawSQL() string {\n\treturn string(s)\n}",
"func (q *orderbookQueryBuilder) Filter(col string, v interface{}) string {\n\tstr, ok := v.(string)\n\n\tif ok && str == \"\" {\n\t\treturn fmt.Sprintf(\"%s IS NULL\", col)\n\t}\n\n\tn := q.pushArg(v)\n\treturn fmt.Sprintf(\"%s = $%d\", col, n)\n}",
"func (o Op) SQL() string {\n\treturn opFormat[o]\n}",
"func (d DotSQLItem) RawSQL() string {\n\tdelimiter := d.Attrs[\"delimiter\"]\n\tif delimiter == \"\" {\n\t\tdelimiter = \";\"\n\t}\n\n\treturn TrimSQL(strings.Join(d.Content, \"\\n\"), delimiter)\n}",
"func (q SelectQuery) AppendSQL(buf *strings.Builder, args *[]interface{}, params map[string]int) {\n\t// WITH\n\tif !q.nested {\n\t\tappendCTEs(buf, args, q.CTEs, q.FromTable, q.JoinTables)\n\t}\n\t// SELECT\n\tif q.SelectType == \"\" {\n\t\tq.SelectType = SelectTypeDefault\n\t}\n\tbuf.WriteString(string(q.SelectType))\n\tif q.SelectType == SelectTypeDistinctOn {\n\t\tbuf.WriteString(\" (\")\n\t\tq.DistinctOn.AppendSQLExclude(buf, args, nil, nil)\n\t\tbuf.WriteString(\")\")\n\t}\n\tif len(q.SelectFields) > 0 {\n\t\tbuf.WriteString(\" \")\n\t\tq.SelectFields.AppendSQLExcludeWithAlias(buf, args, nil, nil)\n\t}\n\t// FROM\n\tif q.FromTable != nil {\n\t\tbuf.WriteString(\" FROM \")\n\t\tswitch v := q.FromTable.(type) {\n\t\tcase Query:\n\t\t\tbuf.WriteString(\"(\")\n\t\t\tv.NestThis().AppendSQL(buf, args, nil)\n\t\t\tbuf.WriteString(\")\")\n\t\tdefault:\n\t\t\tq.FromTable.AppendSQL(buf, args, nil)\n\t\t}\n\t\talias := q.FromTable.GetAlias()\n\t\tif alias != \"\" {\n\t\t\tbuf.WriteString(\" AS \")\n\t\t\tbuf.WriteString(alias)\n\t\t}\n\t}\n\t// JOIN\n\tif len(q.JoinTables) > 0 {\n\t\tbuf.WriteString(\" \")\n\t\tq.JoinTables.AppendSQL(buf, args, nil)\n\t}\n\t// WHERE\n\tif len(q.WherePredicate.Predicates) > 0 {\n\t\tbuf.WriteString(\" WHERE \")\n\t\tq.WherePredicate.toplevel = true\n\t\tq.WherePredicate.AppendSQLExclude(buf, args, nil, nil)\n\t}\n\t// GROUP BY\n\tif len(q.GroupByFields) > 0 {\n\t\tbuf.WriteString(\" GROUP BY \")\n\t\tq.GroupByFields.AppendSQLExclude(buf, args, nil, nil)\n\t}\n\t// HAVING\n\tif len(q.HavingPredicate.Predicates) > 0 {\n\t\tbuf.WriteString(\" HAVING \")\n\t\tq.HavingPredicate.toplevel = true\n\t\tq.HavingPredicate.AppendSQLExclude(buf, args, nil, nil)\n\t}\n\t// WINDOW\n\tif len(q.Windows) > 0 {\n\t\tbuf.WriteString(\" WINDOW \")\n\t\tq.Windows.AppendSQL(buf, args, nil)\n\t}\n\t// ORDER BY\n\tif len(q.OrderByFields) > 0 {\n\t\tbuf.WriteString(\" ORDER BY \")\n\t\tq.OrderByFields.AppendSQLExclude(buf, args, nil, nil)\n\t}\n\t// LIMIT\n\tif q.LimitValue != nil {\n\t\tbuf.WriteString(\" LIMIT ?\")\n\t\tif *q.LimitValue < 0 {\n\t\t\t*q.LimitValue = -*q.LimitValue\n\t\t}\n\t\t*args = append(*args, *q.LimitValue)\n\t}\n\t// OFFSET\n\tif q.OffsetValue != nil {\n\t\tbuf.WriteString(\" OFFSET ?\")\n\t\tif *q.OffsetValue < 0 {\n\t\t\t*q.OffsetValue = -*q.OffsetValue\n\t\t}\n\t\t*args = append(*args, *q.OffsetValue)\n\t}\n\tif !q.nested {\n\t\tquery := buf.String()\n\t\tbuf.Reset()\n\t\tquestionToDollarPlaceholders(buf, query)\n\t\tif q.Log != nil {\n\t\t\tvar logOutput string\n\t\t\tswitch {\n\t\t\tcase Lstats&q.LogFlag != 0:\n\t\t\t\tlogOutput = \"\\n----[ Executing query ]----\\n\" + buf.String() + \" \" + fmt.Sprint(*args) +\n\t\t\t\t\t\"\\n----[ with bind values ]----\\n\" + questionInterpolate(query, *args...)\n\t\t\tcase Linterpolate&q.LogFlag != 0:\n\t\t\t\tlogOutput = questionInterpolate(query, *args...)\n\t\t\tdefault:\n\t\t\t\tlogOutput = buf.String() + \" \" + fmt.Sprint(*args)\n\t\t\t}\n\t\t\tswitch q.Log.(type) {\n\t\t\tcase *log.Logger:\n\t\t\t\t_ = q.Log.Output(q.logSkip+2, logOutput)\n\t\t\tdefault:\n\t\t\t\t_ = q.Log.Output(q.logSkip+1, logOutput)\n\t\t\t}\n\t\t}\n\t}\n}",
"func (col *StringColumn) SQL() string {\n\tmodifiers := strings.Join(col.modifiers, \" \")\n\treturn fmt.Sprintf(\"`%s` VARCHAR(%d) %s\", col.name, col.length, modifiers)\n}",
"func SQLParameterList(fields []Field) string {\n\tvar params []string\n\tfor _, f := range fields {\n\t\tparams = append(params, fmt.Sprintf(\"%s=?\", f.SQLName))\n\t}\n\treturn strings.Join(params, \"AND \")\n}",
"func SendSQL(cod string) string {\n\treturn Queries[cod]\n}",
"func (q *OrderBookSummaryQuery) Filter(col string, v interface{}) string {\n\tstr, ok := v.(string)\n\n\tif ok && str == \"\" {\n\t\treturn fmt.Sprintf(\"%s IS NULL\", col)\n\t}\n\n\tn := q.pushArg(v)\n\treturn fmt.Sprintf(\"%s = $%d\", col, n)\n}",
"func PrepareFuncSQL(cfg *AplFlags, args []string) (string, []interface{}) {\n\tmtd := args[0]\n\targVals := args[1:]\n\n\targValPrep := make([]interface{}, len(argVals))\n\targIDs := make([]string, len(argVals))\n\n\tfor i, v := range argVals {\n\t\targIDs[i] = fmt.Sprintf(\"$%d\", i+1)\n\t\targValPrep[i] = v\n\t}\n\n\targIDStr := strings.Join(argIDs, \",\")\n\n\tq := fmt.Sprintf(\"select * from %s.%s(%s)\", cfg.Schema, mtd, argIDStr)\n\n\treturn q, argValPrep\n}",
"func (b *Bounds) SQL() string {\n\treturn fmt.Sprintf(\"ST_MakeEnvelope(%g, %g, %g, %g, %d)\",\n\t\tb.Xmin, b.Ymin,\n\t\tb.Xmax, b.Ymax, b.SRID)\n}",
"func sqlClause() string {\n\t// MySQL timestamp\n\tt := time.Now().AddDate(0, 0, -backdays).Format(\"2006-01-02 15:04:05\")\n\n\treturn fmt.Sprintf(\"WHERE updated_at >= '%s'\", t)\n}",
"func (sc *SearchCriteria) BuildQuery(tableName string) string {\n\twhere := \"\"\n\tif len(sc.conditions) > 0 {\n\t\twhere = \" WHERE \" + strings.Join(sc.conditions, \" AND \")\n\t}\n\n\treturn \"SELECT id, source, category, level, message, trace, payload, created_at FROM \" + tableName + where + \" ORDER BY created_at DESC LIMIT ?\"\n}",
"func (qb *QueryBuilder) GenerateWhere(filter models.Filter) (where dbx.Expression, err error) {\n\tvar expressions []dbx.Expression\n\terr = filter.ForEach(func(field string, operator string, value interface{}) error {\n\t\tswitch operator {\n\t\tcase \"=\":\n\t\t\texp := dbx.HashExp{field: value}\n\t\t\texpressions = append(expressions, exp)\n\t\tcase \">\":\n\t\t\te := fmt.Sprintf(\"%s>{:%s}\", field, field)\n\t\t\texp := dbx.NewExp(e, dbx.Params{field: value})\n\t\t\texpressions = append(expressions, exp)\n\t\tcase \"<\":\n\t\t\te := fmt.Sprintf(\"%s<{:%s}\", field, field)\n\t\t\texp := dbx.NewExp(e, dbx.Params{field: value})\n\t\t\texpressions = append(expressions, exp)\n\t\tcase \"<=\":\n\t\t\te := fmt.Sprintf(\"%s<={:%s}\", field, field)\n\t\t\texp := dbx.NewExp(e, dbx.Params{field: value})\n\t\t\texpressions = append(expressions, exp)\n\t\tcase \">=\":\n\t\t\te := fmt.Sprintf(\"%s>={:%s}\", field, field)\n\t\t\texp := dbx.NewExp(e, dbx.Params{field: value})\n\t\t\texpressions = append(expressions, exp)\n\t\tcase \"!=\":\n\t\t\te := fmt.Sprintf(\"%s!={:%s}\", field, field)\n\t\t\texp := dbx.NewExp(e, dbx.Params{field: value})\n\t\t\texpressions = append(expressions, exp)\n\t\tcase \"<>\":\n\t\t\te := fmt.Sprintf(\"%s<>{:%s}\", field, field)\n\t\t\texp := dbx.NewExp(e, dbx.Params{field: value})\n\t\t\texpressions = append(expressions, exp)\n\t\tcase \"in\":\n\t\t\texp := dbx.In(field, value)\n\t\t\texpressions = append(expressions, exp)\n\t\tcase \"not in\":\n\t\t\texp := dbx.NotIn(field, value)\n\t\t\texpressions = append(expressions, exp)\n\t\tcase \"like\":\n\t\t\texp := dbx.Like(field, cast.ToString(value))\n\t\t\texpressions = append(expressions, exp)\n\t\tcase \"not like\":\n\t\t\texp := dbx.Like(field, cast.ToString(value))\n\t\t\texpressions = append(expressions, exp)\n\t\tcase \"between\":\n\t\t\tslice, err := cast.ToSliceE(value)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif len(slice) < 2 {\n\t\t\t\treturn errors.Errorf(\"%s field value must be a slice at least two\", field)\n\t\t\t}\n\t\t\texp := dbx.Between(field, slice[0], slice[1])\n\t\t\texpressions = append(expressions, exp)\n\t\tcase \"not between\":\n\t\t\tslice, err := cast.ToSliceE(value)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif len(slice) < 2 {\n\t\t\t\treturn errors.Errorf(\"%s field value must be a slice at least two\", field)\n\t\t\t}\n\t\t\texp := dbx.NotBetween(field, slice[0], slice[1])\n\t\t\texpressions = append(expressions, exp)\n\t\tdefault:\n\t\t\treturn errors.Errorf(\"operator %s not supported\", operator)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn dbx.And(expressions...), nil\n}",
"func (this *query) String() (string) {\n\n\t// Return stored query string if it has already been generated.\n\n\tif this.queryString != `` {\n\t\treturn this.queryString\n\t}\n\n\t// Verify we have at least the minimum query elements for a valid query.\n\n\tif this.Table == `` || this.Command == `` {\n\t\treturn ``\n\t}\n\n\t// Convenience method for generating named parameters in query strings.\n\n\ttoFormat := func(format string, cols []string) (list []string) {\n\t\tfor _, col := range cols {\n\t\t\tif col == `*` { continue }\n\t\t\tcol = strings.ToLower(col)\n\t\t\tlist = append(list, fmt.Sprintf(format, col))\n\t\t}\n\t\treturn list\n\t}\n\n\t// Convert the table name to lowercase.\n\n\ttable := strings.ToLower(this.Table)\n\n\t// Convert the SQL command to uppercase.\n\n\tcommand := strings.ToUpper(this.Command)\n\n\t// Generate comma-separated list of lowercase column names.\n\n\tcolumns := strings.ToLower(strings.Join(this.Columns, `, `))\n\n\t// Generate comma-separated list of named parameters.\n\n\tparams := strings.Join(toFormat(namedParamFmt, this.Columns), `, `)\n\n\t// Generate comma-separated list of column assignments.\n\n\tsetters := strings.Join(toFormat(namedEqualFmt, this.Columns), `, `)\n\n\t// Generate AND-separated list of condition filters.\n\n\tfilters := strings.Join(toFormat(namedEqualFmt, this.Filters), ` AND `)\n\n\tswitch command {\n\n\tcase `INSERT`, `REPLACE`:\n\t\tthis.queryString = fmt.Sprintf(`%s INTO %s (%s) VALUES (%s)`,\n\t\t\tcommand, table, columns, params,\n\t\t)\n\n\tcase `SELECT`:\n\t\tthis.queryString = fmt.Sprintf(`%s %s FROM %s`,\n\t\t\tcommand, columns, table,\n\t\t)\n\n\tcase `UPDATE`:\n\t\tthis.queryString = fmt.Sprintf(`%s %s SET %s`,\n\t\t\tcommand, table, setters,\n\t\t)\n\n\tcase `DELETE`:\n\t\tthis.queryString = fmt.Sprintf(`DELETE FROM %s`,\n\t\t\ttable,\n\t\t)\n\n\tdefault:\n\t\treturn ``\n\t}\n\n\tif filters != `` {\n\t\tthis.queryString = fmt.Sprintf(`%s WHERE %s`,\n\t\t\tthis.queryString, filters,\n\t\t)\n\t}\n\n\treturn this.queryString\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validate validates the FUOTADeployment data. | func (fd FUOTADeployment) Validate() error {
if strings.TrimSpace(fd.Name) == "" {
return errHandler.ErrFUOTADeploymentInvalidName
}
if len(fd.Payload) <= 0 || fd.Payload == nil {
return errHandler.ErrFUOTADeploymentNullPayload
}
return nil
} | [
"func (m *Deployment) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif v, ok := interface{}(m.GetId()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn DeploymentValidationError{\n\t\t\t\tfield: \"Id\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\t// no validation rules for Name\n\n\t// no validation rules for Description\n\n\tif v, ok := interface{}(m.GetArtifact()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn DeploymentValidationError{\n\t\t\t\tfield: \"Artifact\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetArtifactId()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn DeploymentValidationError{\n\t\t\t\tfield: \"ArtifactId\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetKubeCluster()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn DeploymentValidationError{\n\t\t\t\tfield: \"KubeCluster\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetKubeClusterId()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn DeploymentValidationError{\n\t\t\t\tfield: \"KubeClusterId\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetApplicationInstanceId()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn DeploymentValidationError{\n\t\t\t\tfield: \"ApplicationInstanceId\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (d DeploymentStep) Validate() error {\n\treturn validator.New().Struct(d)\n}",
"func (m *OnboardingDeploymentResponse) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func validate_Deployments_Update_1(ctx context.Context, r json.RawMessage) (err error) {\n\treturn validate_Object_Deployment(ctx, r, \"\")\n}",
"func (m *ForecastMeta) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateUnits(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateUpdatedAt(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *ReleaseDefinitionEnvironment) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateConditions(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateCurrentRelease(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDemands(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDeployPhases(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDeployStep(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateEnvironmentOptions(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateEnvironmentTriggers(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateExecutionPolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOwner(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePostDeployApprovals(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePostDeploymentGates(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePreDeployApprovals(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePreDeploymentGates(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateProcessParameters(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateProperties(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRetentionPolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSchedules(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateVariables(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *Forecast) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateMeta(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTimeseries(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *UpdateDeploymentResponse) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif v, ok := interface{}(m.GetResult()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn UpdateDeploymentResponseValidationError{\n\t\t\t\tfield: \"Result\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (o *PostUpdateSoftwareComponentsOKBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *DeploymentTemplateStep) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateExcludedMachines(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMachines(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateUnavailableMachines(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *PostDatasetStagingDownloadBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateSourcePath(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateSourceSystem(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func validate_Object_Deployment(ctx context.Context, r json.RawMessage, path string) (err error) {\n\tif hook, ok := interface{}(&Deployment{}).(interface {\n\t\tAtlasJSONValidate(context.Context, json.RawMessage, string) (json.RawMessage, error)\n\t}); ok {\n\t\tif r, err = hook.AtlasJSONValidate(ctx, r, path); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar v map[string]json.RawMessage\n\tif err = json.Unmarshal(r, &v); err != nil {\n\t\treturn fmt.Errorf(\"invalid value for %q: expected object.\", path)\n\t}\n\n\tif err = validate_required_Object_Deployment(ctx, v, path); err != nil {\n\t\treturn err\n\t}\n\n\tallowUnknown := runtime1.AllowUnknownFromContext(ctx)\n\n\tfor k, _ := range v {\n\t\tswitch k {\n\t\tcase \"id\":\n\t\t\tif v[k] == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvv := v[k]\n\t\t\tvvPath := runtime1.JoinPath(path, k)\n\t\t\tvalidator, ok := interface{}(&resource.Identifier{}).(interface {\n\t\t\t\tAtlasValidateJSON(context.Context, json.RawMessage, string) error\n\t\t\t})\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err = validator.AtlasValidateJSON(ctx, vv, vvPath); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase \"name\":\n\t\tcase \"description\":\n\t\tcase \"artifact\":\n\t\t\tif v[k] == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvv := v[k]\n\t\t\tvvPath := runtime1.JoinPath(path, k)\n\t\t\tif err = validate_Object_Artifact(ctx, vv, vvPath); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase \"artifact_id\":\n\t\t\tif v[k] == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvv := v[k]\n\t\t\tvvPath := runtime1.JoinPath(path, k)\n\t\t\tvalidator, ok := interface{}(&resource.Identifier{}).(interface {\n\t\t\t\tAtlasValidateJSON(context.Context, json.RawMessage, string) error\n\t\t\t})\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err = validator.AtlasValidateJSON(ctx, vv, vvPath); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase \"kube_cluster\":\n\t\t\tif v[k] == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvv := v[k]\n\t\t\tvvPath := runtime1.JoinPath(path, k)\n\t\t\tif err = validate_Object_KubeCluster(ctx, vv, vvPath); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase \"kube_cluster_id\":\n\t\t\tif v[k] == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvv := v[k]\n\t\t\tvvPath := runtime1.JoinPath(path, k)\n\t\t\tvalidator, ok := interface{}(&resource.Identifier{}).(interface {\n\t\t\t\tAtlasValidateJSON(context.Context, json.RawMessage, string) error\n\t\t\t})\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err = validator.AtlasValidateJSON(ctx, vv, vvPath); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase \"application_instance_id\":\n\t\t\tif v[k] == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvv := v[k]\n\t\t\tvvPath := runtime1.JoinPath(path, k)\n\t\t\tvalidator, ok := interface{}(&resource.Identifier{}).(interface {\n\t\t\t\tAtlasValidateJSON(context.Context, json.RawMessage, string) error\n\t\t\t})\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err = validator.AtlasValidateJSON(ctx, vv, vvPath); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\tif !allowUnknown {\n\t\t\t\treturn fmt.Errorf(\"unknown field %q.\", runtime1.JoinPath(path, k))\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (m *DeleteAppVersionResponse) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\treturn nil\n}",
"func (m *CreateDeploymentResponse) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif v, ok := interface{}(m.GetResult()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn CreateDeploymentResponseValidationError{\n\t\t\t\tfield: \"Result\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (m *ReleaseDefinitionEnvironmentSummary) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateLastReleases(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *TemplateDeploymentInfo) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateTargetInfo(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func runValidation(appSpec []byte, computePlatform string) error {\n\tvar err error\n\n\t// Validate version before converting AppSpec to objects\n\terr = validateVersionString(string(appSpec))\n\n\tif err != nil {\n\t\tnumOfErrors++\n\t\treturn err\n\t}\n\n\tif computePlatform == \"ecs\" {\n\t\tecsAppSpecModel, modelErr := getEcsAppSpecObjFromString(appSpec)\n\t\tif modelErr != nil {\n\t\t\treturn modelErr\n\t\t}\n\t\terr = validateEcsAppSpec(ecsAppSpecModel)\n\t} else if computePlatform == \"lambda\" {\n\t\tlambdaAppSpecModel, modelErr := getLambdaAppSpecObjFromString(appSpec)\n\t\tif modelErr != nil {\n\t\t\treturn modelErr\n\t\t}\n\t\terr = validateLambdaAppSpec(lambdaAppSpecModel)\n\t} else {\n\t\tserverAppSpecModel, modelErr := getServerAppSpecObjFromString(appSpec)\n\t\tif modelErr != nil {\n\t\t\treturn modelErr\n\t\t}\n\t\terr = validateServerAppSpec(serverAppSpecModel)\n\t}\n\n\treturn err\n}",
"func (m *CreateDeploymentRequest) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif v, ok := interface{}(m.GetPayload()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn CreateDeploymentRequestValidationError{\n\t\t\t\tfield: \"Payload\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (m *FirmwareUpdateProgress) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateFirmwareUpdateProgressInlineUpdateState(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateJob(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateUpdateType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gomake creates a cli app for the given Gomakefile. | func Gomake(gomakefile *Gomakefile) *cli.App {
app := &cli.App{
Name: "gomake - Makefile for gophers",
Version: Version,
Action: func(ctx *cli.Context) error {
_, ok := gomakefile.Targets[""]
if !ok {
return nil
}
results := gomakefile.Make("")
return HandleResults(results)
},
}
for gomakeTarget, rule := range gomakefile.Targets {
// Skip default
if gomakeTarget == "" {
continue
}
// Create closure around target for command
target := gomakeTarget
command := &cli.Command{
Name: target,
Description: rule.Description,
Action: func(ctx *cli.Context) error {
results := gomakefile.Make(target)
return HandleResults(results)
},
}
app.Commands = append(app.Commands, command)
}
sort.Sort(app.Commands)
return app
} | [
"func Generate() *cli.App {\n\tapp := cli.NewApp()\n\n\tapp.Name = \"Command line application\"\n\tapp.Usage = \"Find IPs and server names over the internet\"\n\n\tflags := []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"host\",\n\t\t\tValue: \"devbook.com.br\",\n\t\t},\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"ip\",\n\t\t\tUsage: \"Search for IPs on internet\",\n\t\t\tFlags: flags,\n\t\t\tAction: findIps,\n\t\t},\n\t\t{\n\t\t\tName: \"server\",\n\t\t\tUsage: \"Search for server name on internet\",\n\t\t\tFlags: flags,\n\t\t\tAction: findServerName,\n\t\t},\n\t}\n\n\treturn app\n}",
"func NewApp() *cobra.Command {\n\tc := &cobra.Command{\n\t\tUse: \"app [github.com/org/repo]\",\n\t\tShort: \"Generates an empty application\",\n\t\tArgs: cobra.ExactArgs(1),\n\t\tRunE: appHandler,\n\t}\n\tc.Flags().String(\"address-prefix\", \"cosmos\", \"Address prefix\")\n\treturn c\n}",
"func setupCliApp() *cli.App {\n\tapp := cli.NewApp()\n\tapp.Name = \"goforecast\"\n\tapp.Usage = `goforecast looks up three days of weather based upon a partial\n\t address; e.g., a zip code.`\n\tapp.Author = \"Alex Toombs\"\n\n\tpopulateCommands(app)\n\treturn app\n}",
"func CliApp() {\n\tvar repeatCount int\n\tvar err error\n\tvar gopherName string\n\n\tflag.StringVar(&gopherName, \"gophername\", \"Gopher\", \"name of the gopher\")\n\tflag.Parse()\n\tfmt.Println(\"Hello \" + gopherName + \" !\")\n\tif len(os.Args) >= 2 {\n\t\trepeatCount, err = strconv.Atoi(os.Args[1])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfor i := 0; i < repeatCount; i++ {\n\t\t\tfmt.Println(os.Args[0])\n\t\t\tfmt.Println(\"Hello CLI\")\n\t\t}\n\t} else {\n\t\tfmt.Println(\"Nothing Bro\")\n\t}\n}",
"func CreateCommandLine() *cli.App {\n\tapp := cli.NewApp()\n\tapp.Version = \"0.0.14\"\n\tapp.Author = \"Pouyan Azari\"\n\tapp.EnableBashCompletion = true\n\tapp.Name = \"A2A\"\n\tapp.Usage = \"Almanac2Ansible helps you to use your Almanac inventory as Ansible dynamic inventory\"\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"vagrant, a\",\n\t\t\tUsage: \"Vagrant mode which needs the name of the host to be added to the given service\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"list, l\",\n\t\t\tUsage: \"Lists the Services and Hosts in a way readable by Ansible.\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"host, s\",\n\t\t\tUsage: \"List the properties for the given host\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"alertmanager, m\",\n\t\t\tUsage: \"Returns the alert manager settings, \" +\n\t\t\t\t\"it reads the existing file and adds the needed data to the alerts\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"blackbox, b\",\n\t\t\tUsage: \"Returns the list of services that support the blackbox\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"prometheus, p\",\n\t\t\tUsage: \"Returns the list of services supported by Prometheus for the given host\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"ignore, i\",\n\t\t\tUsage: \"Make the Prometheus or blackbox exporter, ignore the given group\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"no-cache, n\",\n\t\t\tUsage: \"Run the application in no cache mode\",\n\t\t},\n\t}\n\n\treturn app\n}",
"func newCliApp() *cli.App {\n\t// Log to standard error instead of files.\n\tflag.Set(\"logtostderr\", \"true\")\n\n\t// Flushes all pending log I/O.\n\tdefer glog.Flush()\n\n\tapp := cli.NewApp()\n\n\tapp.Name = \"cyclone-worker\"\n\n\topts := options.NewWorkerOptions()\n\topts.AddFlags(app)\n\n\tapp.Action = func(c *cli.Context) error {\n\t\tglog.Info(\"worker options: %v\", opts)\n\n\t\tworker := worker.NewWorker(opts)\n\t\treturn worker.Run()\n\t}\n\n\t// sort flags by name\n\tsort.Sort(cli.FlagsByName(app.Flags))\n\n\treturn app\n}",
"func App(name, desc string) *Cli {\n\tc := &Cli{\n\t\tcli: cli.App(name, desc),\n\t\tcmd: &rootCmd{\n\t\t\tname: name,\n\t\t},\n\t}\n\t// // c.cli.Version(\"v\", appVersion)\n\t// c.cli.Command(\"version\", \"print delailed version info\", func(cmd *cli.Cmd) {\n\t// \tcmd.Action = func() {\n\t// \t\tfmt.Print(BuildDetails())\n\t// \t}\n\t// })\n\tc.name = name\n\treturn c\n}",
"func NewApp() *cli.App {\n\tcli.AppHelpTemplate = `NAME:\n {{.Name}}{{if .Usage}} - {{.Usage}}{{end}}\nUSAGE:\n {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}\nVERSION:\n {{.Version}}{{end}}{{end}}{{if .Description}}\nDESCRIPTION:\n {{.Description}}{{end}}{{if len .Authors}}\nAUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}:\n {{range $index, $author := .Authors}}{{if $index}}\n {{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}}\nOPTIONS:\n {{range $index, $option := .VisibleFlags}}{{if $index}}\n {{end}}{{$option}}{{end}}{{end}}\n`\n\tapp := cli.NewApp()\n\tapp.Name = \"dockle\"\n\tapp.Version = version\n\tapp.ArgsUsage = \"image_name\"\n\n\tapp.Usage = \"Container Image Linter for Security, Helping build the Best-Practice Docker Image, Easy to start\"\n\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"input\",\n\t\t\tUsage: \"input file path instead of image name\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"ignore, i\",\n\t\t\tEnvVar: \"DOCKLE_IGNORES\",\n\t\t\tUsage: \"checkpoints to ignore. You can use .dockleignore too.\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"accept-key, ak\",\n\t\t\tEnvVar: \"DOCKLE_ACCEPT_KEYS\",\n\t\t\tUsage: \"For CIS-DI-0010. You can add acceptable keywords. e.g) -ak GPG_KEY -ak KEYCLOAK\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"accept-file, af\",\n\t\t\tEnvVar: \"DOCKLE_ACCEPT_FILES\",\n\t\t\tUsage: \"For CIS-DI-0010. You can add acceptable file names. e.g) -af id_rsa -af config.json\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"accept-file-extension, ae\",\n\t\t\tEnvVar: \"DOCKLE_ACCEPT_FILE_EXTENSIONS\",\n\t\t\tUsage: \"For CIS-DI-0010. You can add acceptable file extensions. e.g) -ae pem -ae log\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"format, f\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"format (json)\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"output, o\",\n\t\t\tUsage: \"output file name\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"exit-code, c\",\n\t\t\tUsage: \"exit code when alert were found\",\n\t\t\tValue: 0,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"exit-level, l\",\n\t\t\tUsage: \"change ABEND level when use exit-code=1\",\n\t\t\tValue: \"WARN\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"debug, d\",\n\t\t\tUsage: \"debug mode\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"quiet, q\",\n\t\t\tUsage: \"suppress log output\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"no-color\",\n\t\t\tEnvVar: \"NO_COLOR\",\n\t\t\tUsage: \"suppress log output\",\n\t\t},\n\n\t\t// Registry flag\n\t\tcli.DurationFlag{\n\t\t\tName: \"timeout, t\",\n\t\t\tValue: time.Second * 90,\n\t\t\tEnvVar: \"DOCKLE_TIMEOUT\",\n\t\t\tUsage: \"docker timeout. e.g) 5s, 5m...\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"authurl\",\n\t\t\tEnvVar: \"DOCKLE_AUTH_URL\",\n\t\t\tUsage: \"registry authenticate url\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"username\",\n\t\t\tEnvVar: \"DOCKLE_USERNAME\",\n\t\t\tUsage: \"registry login username\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"password\",\n\t\t\tEnvVar: \"DOCKLE_PASSWORD\",\n\t\t\tUsage: \"registry login password. Using --password via CLI is insecure.\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"insecure\",\n\t\t\tEnvVar: \"DOCKLE_INSECURE\",\n\t\t\tUsage: \"registry connect insecure\",\n\t\t},\n\t\tcli.BoolTFlag{\n\t\t\tName: \"nonssl\",\n\t\t\tEnvVar: \"DOCKLE_NON_SSL\",\n\t\t\tUsage: \"registry connect without ssl\",\n\t\t},\n\n\t\tcli.StringFlag{\n\t\t\tName: \"cache-dir\",\n\t\t\tUsage: \"cache directory\",\n\t\t},\n\t}\n\n\tapp.Action = pkg.Run\n\treturn app\n}",
"func CreateCLI() *CLI {\n\tc := &CLI{\n\t\tapp: cli.NewApp(),\n\t}\n\tc.app.Name = version.Description\n\tc.app.Usage = version.Usage\n\tc.app.Version = version.Version()\n\tc.app.Flags = []cli.Flag{\n\t\t&cli.StringFlag{\n\t\t\tName: \"gitlab-url\",\n\t\t\tAliases: []string{\"u\"},\n\t\t\tValue: gitLabURLDefault,\n\t\t\tEnvVars: []string{envPrefix + \"GITLAB_URL\"},\n\t\t\tUsage: \"Your GitLab server URL\",\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"gitlab-token\",\n\t\t\tAliases: []string{\"t\"},\n\t\t\tValue: gitLabTokenDefault,\n\t\t\tEnvVars: []string{envPrefix + \"GITLAB_TOKEN\"},\n\t\t\tUsage: \"Your GitLab access token\",\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"gitlab-group\",\n\t\t\tAliases: []string{\"g\"},\n\t\t\tValue: gitLabGroupDefault,\n\t\t\tEnvVars: []string{envPrefix + \"GITLAB_GROUP\"},\n\t\t\tUsage: \"GitLab project group\",\n\t\t},\n\t\t&cli.StringSliceFlag{\n\t\t\tName: \"exclude-projects\",\n\t\t\tAliases: []string{\"e\"},\n\t\t\tValue: cli.NewStringSlice(excludeProjectsDefault...),\n\t\t\tEnvVars: []string{envPrefix + \"EXCLUDE_PROJECTS\"},\n\t\t\tUsage: \"GitLab projects to exclude\",\n\t\t},\n\t\t&cli.StringFlag{\n\t\t\tName: \"config-file\",\n\t\t\tAliases: []string{\"c\"},\n\t\t\tValue: \".gitlab-tool.yml\",\n\t\t\tUsage: \"Application config file\",\n\t\t},\n\t}\n\tc.app.Commands = cli.Commands{\n\t\t{\n\t\t\tName: \"get\",\n\t\t\tUsage: \"get objects from gitlab server\",\n\t\t\t// Action: c.get,\n\t\t\tSubcommands: cli.Commands{\n\t\t\t\t{\n\t\t\t\t\tName: \"projects\",\n\t\t\t\t\tUsage: \"get projects from gitlab group\",\n\t\t\t\t\tAction: c.getProjects,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"tags\",\n\t\t\t\t\tUsage: \"get projects latest tags\",\n\t\t\t\t\tAction: c.getTags,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"merge-requests\",\n\t\t\t\t\tAliases: []string{\"mrs\"},\n\t\t\t\t\tUsage: \"get projects latest tags\",\n\t\t\t\t\tAction: c.getMRs,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"clone\",\n\t\t\tUsage: \"clone project or group of projects\",\n\t\t\tAction: c.clone,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\t&cli.StringFlag{\n\t\t\t\t\tName: \"exclude\",\n\t\t\t\t\tAliases: []string{\"e\"},\n\t\t\t\t\tUsage: \"projects to exclude\",\n\t\t\t\t},\n\t\t\t\t// &cli.BoolFlag{\n\t\t\t\t// \tName: \"non-empty\",\n\t\t\t\t// \tAliases: []string{\"n\"},\n\t\t\t\t// \tValue: cloneNonEmptyOnlyDefault,\n\t\t\t\t// \tEnvVars: []string{envPrefix + \"CLONE_NON_EMPTY\"},\n\t\t\t\t// \tUsage: \"Clone only non-empty projects\",\n\t\t\t\t// },\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"changelog\",\n\t\t\tAliases: []string{\"chl\"},\n\t\t\tUsage: \"changelog operations\",\n\t\t\tSubcommands: cli.Commands{\n\t\t\t\t{\n\t\t\t\t\tName: \"add\",\n\t\t\t\t\tUsage: \"add changelog section\",\n\t\t\t\t\tAction: c.addChangelog,\n\t\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\t\tName: \"package\",\n\t\t\t\t\t\t\tAliases: []string{\"p\"},\n\t\t\t\t\t\t\tUsage: \"package name\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\t\tName: \"version\",\n\t\t\t\t\t\t\tAliases: []string{\"v\"},\n\t\t\t\t\t\t\tUsage: \"version number\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\t\tName: \"release\",\n\t\t\t\t\t\t\tAliases: []string{\"r\"},\n\t\t\t\t\t\t\tUsage: \"release string\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\t\tName: \"urgency\",\n\t\t\t\t\t\t\tAliases: []string{\"u\"},\n\t\t\t\t\t\t\tUsage: \"urgency string\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\t\tName: \"changes\",\n\t\t\t\t\t\t\tAliases: []string{\"c\"},\n\t\t\t\t\t\t\tUsage: \"changes multi-string\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\t\tName: \"maintainer\",\n\t\t\t\t\t\t\tAliases: []string{\"m\"},\n\t\t\t\t\t\t\tUsage: \"maintainer name and email\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\t\tName: \"date\",\n\t\t\t\t\t\t\tAliases: []string{\"d\"},\n\t\t\t\t\t\t\tUsage: \"update date\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tc.app.Before = altsrc.InitInputSourceWithContext(c.app.Flags, altsrc.NewYamlSourceFromFlagFunc(\"config-file\"))\n\tc.app.Action = c.main\n\treturn c\n}",
"func MakeCli() {\n\tvar languages string\n\tlangList := langs.GetLangs()\n\tlangMap := URLMap(langList)\n\n\t// Make the CLI with Go's cli library\n\tapp := &cli.App{\n\t\tName: \"getignore\",\n\t\tUsage: \"A Pointless CLI to Download Gitignore Files 📥\",\n\t\tFlags: []cli.Flag{\n\n\t\t\t// Create flags that take arguments\n\t\t\t&cli.StringFlag{\n\t\t\t\tName: \"languages\",\n\t\t\t\tAliases: []string{\"lg\"},\n\t\t\t\tUsage: \"Provide the desired languages 🔥\",\n\t\t\t\tDestination: &languages,\n\t\t\t},\n\n\t\t\t// Create flags that don't take any argument\n\t\t\t&cli.BoolFlag{\n\t\t\t\tName: \"list\",\n\t\t\t\tAliases: []string{\"ls\"},\n\t\t\t\tUsage: \"Show a list of available languages 📝\"},\n\t\t},\n\n\t\tAction: func(c *cli.Context) error {\n\t\t\tif len(os.Args) == 1 {\n\t\t\t\tfmt.Println(\"Type 'getignore -h' to see the options💡\")\n\t\t\t\tos.Exit(0)\n\t\t\t}\n\n\t\t\tif os.Args[1] == \"--lg\" || os.Args[1] == \"--languages\" {\n\t\t\t\tfor _, lang := range os.Args[2:] {\n\t\t\t\t\tlangURL := SelectLang(langMap, lang)\n\t\t\t\t\tif langURL != \"\" {\n\t\t\t\t\t\tutils.DownloadFile(langURL, \"./.gitignore\")\n\t\t\t\t\t\tfmt.Printf(\"Downloading %s gitignore 🌧️\\n\", strings.Title(lang))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfmt.Println(\"Download complete 🍰\")\n\t\t\t\tcli.Exit(\"\", 0)\n\n\t\t\t}\n\t\t\tif os.Args[1] == \"--ls\" || os.Args[1] == \"--list\" {\n\t\t\t\tfmt.Println(\"Language List 📝\")\n\t\t\t\tfmt.Println(\"===============\")\n\n\t\t\t\tfor _, lang := range langList {\n\t\t\t\t\tfmt.Println(lang)\n\t\t\t\t}\n\t\t\t\tcli.Exit(\"\", 0)\n\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\n\terr := app.Run(os.Args)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
"func NewCli(version string) *cli.App {\n\tapp := cli.NewApp()\n\tapp.Name = \"Convoy\"\n\tapp.Version = version\n\tapp.Author = \"Sheng Yang <[email protected]>\"\n\tapp.Usage = \"A volume manager capable of snapshot and delta backup\"\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"socket, s\",\n\t\t\tValue: \"/var/run/convoy/convoy.sock\",\n\t\t\tUsage: \"Specify unix domain socket for communication between server and client\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"debug, d\",\n\t\t\tUsage: \"Enable debug level log with client or not\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"verbose\",\n\t\t\tUsage: \"Verbose level output for client, for create volume/snapshot etc\",\n\t\t},\n\t}\n\tapp.CommandNotFound = cmdNotFound\n\tapp.Before = initClient\n\tapp.Commands = []cli.Command{\n\t\tdaemonCmd,\n\t\tinfoCmd,\n\t\tvolumeCreateCmd,\n\t\tvolumeDeleteCmd,\n\t\tvolumeMountCmd,\n\t\tvolumeUmountCmd,\n\t\tvolumeListCmd,\n\t\tvolumeInspectCmd,\n\t\tsnapshotCmd,\n\t\tbackupCmd,\n\t}\n\treturn app\n}",
"func Generate() cli.Command {\n\treturn cli.Command{\n\t\tName: \"generate\",\n\t\tUsage: \"Generate JSON evergreen configurations.\",\n\t\tSubcommands: []cli.Command{\n\t\t\tgenerateGolang(),\n\t\t\tgenerateMake(),\n\t\t},\n\t}\n}",
"func main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"gospel\"\n\tapp.Version = \"0.0.1\" // TODO replace with git version\n\tapp.Usage = \"A tool used to gather truths about a system\"\n\n\tapp.Run(os.Args)\n\tg := gospel.New()\n\tl := g.ListTruths()\n\tfor k, x := range l {\n\t\tfmt.Printf(\"%s %s\\n\", k, x.Description())\n\t}\n\tgt := g.GatherTruths()\n\tfor k, v := range gt {\n\t\tfmt.Printf(\"%s=%s\\n\", k, v)\n\t}\n}",
"func main() {\n\tcli.BashCompletionFlag = cli.BoolFlag{\n\t\tName: \"compgen\",\n\t\tHidden: false,\n\t}\n\n\tapp := cli.NewApp()\n\tapp.EnableBashCompletion = true\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"wat\",\n\t\t},\n\t}\n\tapp.Run(os.Args)\n}",
"func NewApp() *cli.App {\n\n\t// Set custom template for command help\n\tcli.CommandHelpTemplate = CommandHelpTemplate\n\n\t// Set a custom version printer\n\tcli.VersionPrinter = func(c *cli.Context) {\n\t\tt := template.Must(template.New(\"version\").Parse(CommandVersionTemplate))\n\t\tif err := t.Execute(os.Stdout, newVersionInfo()); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tapp := cli.NewApp()\n\tapp.Name = \"envsnap\"\n\tapp.Usage = \"Generate project-defined snapshots of runtime environments\"\n\tapp.Version = Version\n\tapp.CustomAppHelpTemplate = AppHelpTemplate\n\tapp.Flags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"debug\",\n\t\t\tUsage: \"run envsnap with debug logging\",\n\t\t},\n\t}\n\tapp.Before = func(context *cli.Context) error {\n\t\tif context.Bool(\"debug\") {\n\t\t\tlog.SetLevel(log.DebugLevel)\n\t\t} else {\n\t\t\t// If debug logging isn't enabled, set it to panic level to effectively\n\t\t\t// disable logging.\n\t\t\tlog.SetLevel(log.PanicLevel)\n\t\t}\n\t\treturn nil\n\t}\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"init\",\n\t\t\tUsage: \"Initialize a boilerplate .envsnap config\",\n\t\t\tDescription: heredoc.Doc(`\n\t\t\t\tInitialize a boilerplate .envsnap config.\n\n\t\t\t\tThe .envsnap file is a YAML-formatted file used by the envsnap CLI tool to\n\t\t\t\tgenerate a snapshot of the specified environment.\n\n\t\t\t\tThe boilerplate config will contain section comments by default. This\n\t\t\t\tcan be disabled with the '--terse' flag.\n\n\t\t\t\tTo add language-specific sections to the generated config, use the '--lang'\n\t\t\t\tflag, passing to it the language(s), or language shorthand(s), you wish to\n\t\t\t\tinclude.\n\n\t\t\t\tCurrently, the supported languages (shorthands in parentheses) are:\n\t\t\t\t • python (py)\n\t\t\t\t • golang (go)\n\t\t\t\t`,\n\t\t\t),\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"terse, t\",\n\t\t\t\t\tUsage: \"initialize the config without extra comments\",\n\t\t\t\t},\n\t\t\t\tcli.StringSliceFlag{\n\t\t\t\t\tName: \"lang, l\",\n\t\t\t\t\tUsage: \"initialize the config with basic sections for the specified languages\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: commandInit,\n\t\t},\n\t\t{\n\t\t\tName: \"render\",\n\t\t\tUsage: \"Render the environment as specified by the config\",\n\t\t\tDescription: heredoc.Doc(`\n\t\t\t\tThe rendered environment is output to console by default. The '--file' flag can\n\t\t\t\tbe used to write the output to file.\n\n\t\t\t\tThe output format can be set with the '--output' flag. By default, it will render\n\t\t\t\tthe results in markdown format. The allowable output formats are:\n\t\t\t\t • md\t\tMarkdown output (.md)\n\t\t\t\t • txt\t\tPlaintext output (.txt)\n\t\t\t\t • yaml\tYAML output (.yaml)\n\t\t\t\t • json\tJSON output (.json)\n\t\t\t\t`,\n\t\t\t),\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"output, o\",\n\t\t\t\t\tValue: \"md\",\n\t\t\t\t\tUsage: \"specify the output format\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"file, f\",\n\t\t\t\t\tUsage: \"write the output to file\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"quiet, q\",\n\t\t\t\t\tUsage: \"ignore any warnings generated during render\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: commandRender,\n\t\t},\n\t}\n\n\treturn app\n}",
"func newGenerateCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"generate\",\n\t\tAliases: []string{\"gen\"},\n\t\tShort: \"generate paths or JSON/YAML objects from YANG\",\n\t\tPersistentPreRunE: gApp.GeneratePreRunE,\n\t\tRunE: gApp.GenerateRunE,\n\t\tSilenceUsage: true,\n\t}\n\tgApp.InitGenerateFlags(cmd)\n\treturn cmd\n}",
"func Cli() {\n\tCheckConfigurationFolder()\n\tInit()\n\tCheckArguments()\n\tBuildArborescence()\n}",
"func Gen(cfg *config.Config) *cli.Command {\n\treturn &cli.Command{\n\t\tName: \"gen\",\n\t\tUsage: \"Generate required stuff\",\n\t\tSubcommands: GenCommands(cfg),\n\t}\n}",
"func NewApp(fs *flag.FlagSet) (*App, error) {\n\t// if flags weren't given to us, create our own:\n\tif fs == nil {\n\t\tfs = flag.NewFlagSet(\"teleconsole\", flag.ExitOnError)\n\t}\n\t// parse CLI flags\n\tverbose := fs.Bool(\"v\", false, \"\")\n\tverbose2 := fs.Bool(\"vv\", false, \"\")\n\tverbose3 := fs.Bool(\"vvv\", false, \"\")\n\trunCommand := fs.String(\"c\", \"\", \"\")\n\tserverFlag := fs.String(\"s\", \"\", \"\")\n\tinsecure := fs.Bool(\"insecure\", false, \"\")\n\tforwardPorts := fs.String(\"L\", \"\", \"\")\n\tforwardAddr := fs.String(\"f\", \"\", \"\")\n\tidentityFile := fs.String(\"i\", \"\", \"\")\n\n\tfs.Usage = printHelp\n\tfs.Parse(os.Args[1:])\n\tcliArgs := fs.Args()\n\n\t// init logging:\n\tverbosity := 0\n\tif *verbose3 {\n\t\tverbosity = 3\n\t} else if *verbose2 {\n\t\tverbosity = 2\n\t} else if *verbose {\n\t\tverbosity = 1\n\t}\n\tinitLogging(verbosity)\n\n\t// configure teleport internals to use our ping interval.\n\t// IMPORANT: these must be similar for proxies and servers\n\tteleport.SessionRefreshPeriod = SyncRefreshInterval\n\tteleport.ReverseTunnelAgentHeartbeatPeriod = SyncRefreshInterval * 2\n\tteleport.ServerHeartbeatTTL = SyncRefreshInterval * 2\n\n\t// this disables costly Teleport \"key pool\"\n\tnative.PrecalculatedKeysNum = 0\n\n\t// read configuration from rcfile in ~/\n\tconfig, err := conf.Get()\n\tif err != nil {\n\t\tlog.Fatal(\"Configuration error: \", err)\n\t}\n\t// apply CLI flags to the config:\n\tif *serverFlag != \"\" {\n\t\tif err = config.SetEndpointHost(*serverFlag); err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t}\n\n\t// parse -L flag spec (forwarded ports)\n\tif *forwardPorts != \"\" {\n\t\tconfig.ForwardPorts, err = client.ParsePortForwardSpec([]string{*forwardPorts})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif *forwardAddr != \"\" {\n\t\tconfig.ForwardPort, err = lib.ParseForwardAddr(*forwardAddr)\n\t\tif err != nil {\n\t\t\treturn nil, trace.Errorf(\"Invalid forwarding addres spec: %v\\nExamples: localhost:5000 or http://gravitational.com\", err)\n\t\t}\n\t}\n\t// identity file:\n\tconfig.IdentityFile = *identityFile\n\n\tconfig.Verbosity = verbosity\n\tconfig.RunCommand = *runCommand\n\tconfig.Args = cliArgs\n\tconfig.InsecureHTTPS = *insecure\n\n\treturn &App{\n\t\tArgs: cliArgs,\n\t\tconf: config,\n\t\tclient: NewAPIClient(config, version.Version),\n\t}, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Init returns an initialised Mongo object encapsulating a connection to the mongo server/cluster with the given configuration, a health client to check the health of the mongo server/cluster, and a lock client | func (m *Mongo) Init(ctx context.Context) (err error) {
m.Connection, err = mongodriver.Open(&m.MongoDriverConfig)
if err != nil {
return err
}
databaseCollectionBuilder := map[mongohealth.Database][]mongohealth.Collection{
mongohealth.Database(m.Database): {
mongohealth.Collection(m.ActualCollectionName(config.DatasetsCollection)),
mongohealth.Collection(m.ActualCollectionName(config.EditionsCollection)),
mongohealth.Collection(m.ActualCollectionName(config.InstanceCollection)),
mongohealth.Collection(m.ActualCollectionName(config.DimensionOptionsCollection)),
mongohealth.Collection(m.ActualCollectionName(config.InstanceLockCollection)),
},
}
m.healthClient = mongohealth.NewClientWithCollections(m.Connection, databaseCollectionBuilder)
m.lockClient = mongolock.New(ctx, m.Connection, m.ActualCollectionName(config.InstanceCollection))
return nil
} | [
"func (m *MongoConnect) Init() error {\n\tif m.client != nil {\n\t\treturn nil\n\t}\n\topts := options.Client().ApplyURI(m.url)\n\tif m.poolOpts != nil {\n\t\tif m.poolOpts.ConnectTimeout > 0 {\n\t\t\topts.SetConnectTimeout(m.poolOpts.ConnectTimeout)\n\t\t}\n\t\tif m.poolOpts.SocketTimeout > 0 {\n\t\t\topts.SetSocketTimeout(m.poolOpts.SocketTimeout)\n\t\t}\n\t\tif m.poolOpts.ServerSelectionTimeout > 0 {\n\t\t\topts.SetServerSelectionTimeout(m.poolOpts.ServerSelectionTimeout)\n\t\t}\n\n\t\tif m.poolOpts.MaxPoolSize > 0 {\n\t\t\topts.SetMaxPoolSize(uint64(m.poolOpts.MaxPoolSize))\n\t\t}\n\t\tif m.poolOpts.MinPoolSize > 0 {\n\t\t\topts.SetMinPoolSize(uint64(m.poolOpts.MinPoolSize))\n\t\t} else {\n\t\t\topts.SetMinPoolSize(1)\n\t\t}\n\t}\n\tclient, err := mongo.NewClient(opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif m.metricsLogger == nil {\n\t\tm.RegisterMetricsLogger(prom.NewMemoryStoreMetricsLogger(1028))\n\t}\n\tm.client = client\n\tm.clientProxy = &MongoClientProxy{Client: client, mc: m}\n\treturn m.client.Connect(m.NewContext())\n}",
"func New(config Config) func() error {\n\tif config.TimeoutConnect == 0 {\n\t\tconfig.TimeoutConnect = defaultTimeoutConnect\n\t}\n\n\tif config.TimeoutDisconnect == 0 {\n\t\tconfig.TimeoutDisconnect = defaultTimeoutDisconnect\n\t}\n\n\tif config.TimeoutPing == 0 {\n\t\tconfig.TimeoutPing = defaultTimeoutPing\n\t}\n\n\treturn func() (checkErr error) {\n\t\tvar ctx context.Context\n\t\tvar cancel context.CancelFunc\n\n\t\tclient, err := mongo.NewClient(options.Client().ApplyURI(config.DSN))\n\t\tif err != nil {\n\t\t\tcheckErr = fmt.Errorf(\"mongoDB health check failed on client creation: %w\", err)\n\t\t\treturn\n\t\t}\n\n\t\tctx, cancel = context.WithTimeout(context.Background(), config.TimeoutConnect)\n\t\tdefer cancel()\n\n\t\terr = client.Connect(ctx)\n\t\tif err != nil {\n\t\t\tcheckErr = fmt.Errorf(\"mongoDB health check failed on connect: %w\", err)\n\t\t\treturn\n\t\t}\n\n\t\tdefer func() {\n\t\t\tctx, cancel = context.WithTimeout(context.Background(), config.TimeoutDisconnect)\n\t\t\tdefer cancel()\n\n\t\t\t// override checkErr only if there were no other errors\n\t\t\tif err := client.Disconnect(ctx); err != nil && checkErr == nil {\n\t\t\t\tcheckErr = fmt.Errorf(\"mongoDB health check failed on closing connection: %w\", err)\n\t\t\t}\n\t\t}()\n\n\t\tctx, cancel = context.WithTimeout(context.Background(), config.TimeoutPing)\n\t\tdefer cancel()\n\n\t\terr = client.Ping(ctx, readpref.Primary())\n\t\tif err != nil {\n\t\t\tcheckErr = fmt.Errorf(\"mongoDB health check failed on ping: %w\", err)\n\t\t\treturn\n\t\t}\n\n\t\treturn\n\t}\n}",
"func (db *MongoDB) Init() error {\r\n\tdb.Databasename = common.Config.MgDbName\r\n\r\n\t// DialInfo holds options for establishing a session with a MongoDB cluster.\r\n\tdialInfo := &mgo.DialInfo{\r\n\t\tAddrs: []string{common.Config.MgAddrs}, // Get HOST + PORT\r\n\t\tTimeout: 60 * time.Second,\r\n\t\tDatabase: db.Databasename, // Database name\r\n\t\tUsername: common.Config.MgDbUsername, // Username\r\n\t\tPassword: common.Config.MgDbPassword, // Password\r\n\t}\r\n\r\n\t// Create a session which maintains a pool of socket connections\r\n\t// to the DB MongoDB database.\r\n\tvar err error\r\n\tdb.MgDbSession, err = mgo.DialWithInfo(dialInfo)\r\n\r\n\tif err != nil {\r\n\t\tlog.Debug(\"Can't connect to mongo, go error: \", err)\r\n\t\treturn err\r\n\t}\r\n\r\n\treturn err\r\n}",
"func InitialiseMongo() {\n\n\tinfo := &mgo.DialInfo{\n\t\tAddrs: []string{hosts},\n\t\tTimeout: 60 * time.Second,\n\t\tDatabase: \"admin\",\n\t\tUsername: username,\n\t\tPassword: password,\n\t}\n\n\tsession, err := mgo.DialWithInfo(info)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tmongoConnection.session = session\n}",
"func InitConnection() *mongo.Client {\n\tclient, err := mongo.NewClient(options.Client().ApplyURI(\"mongodb://root:example@localhost\"))\n\t// defer client.Disconnect(context.Background())\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t} else {\n\t\tfmt.Println(\"connected to the db :success\")\n\t}\n\tctx, _ := context.WithTimeout(context.Background(), 10*time.Second)\n\terr = client.Connect(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn client\n}",
"func InitiateMongoClient() *mongo.Client {\n\tvar err error\n\tvar client *mongo.Client\n\turi := \"mongodb://localhost:27017/\"\n\topts := options.Client()\n\topts.ApplyURI(uri)\n\topts.SetMaxPoolSize(5)\n\tif client, err = mongo.Connect(context.Background(), opts); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn client\n}",
"func InitMongo(url string) (*Moncore, error) {\n\n\tclientOptions := options.Client().ApplyURI(url)\n\n\tctx_conn, cnc_conn := DefaultContext()\n\tdefer cnc_conn()\n\tclient, err := mongo.Connect(*ctx_conn, clientOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tctx_ping, cnc_ping := DefaultContext()\n\tdefer cnc_ping()\n\tping_err := client.Ping(*ctx_ping, nil)\n\tif ping_err != nil {\n\t\treturn nil, ping_err\n\t}\n\n\tPrint(\"MongoDB client connected\")\n\treturn &Moncore{client: client}, nil\n}",
"func InitMongoDB(ctx context.Context) interfaces.MongoDatabase {\n\tdeferFunc := logger.LogWithDefer(\"Load MongoDB connection...\")\n\tdefer deferFunc()\n\n\treturn &mongoInstance{\n\t\tread: ConnectMongoDB(ctx, env.BaseEnv().DbMongoReadHost),\n\t\twrite: ConnectMongoDB(ctx, env.BaseEnv().DbMongoWriteHost),\n\t}\n}",
"func InitMG(dbConStr string) (*mongo.Client, error) {\r\n\tClientMG, err := mongo.NewClient(options.Client().ApplyURI(dbConStr))\r\n\tif err != nil {\r\n\t\treturn ClientMG, err\r\n\t}\r\n\tctxMG, cancel := context.WithTimeout(context.Background(), 20*time.Second)\r\n\tdefer cancel()\r\n\r\n\terr = ClientMG.Connect(ctxMG)\r\n\tif err != nil {\r\n\t\treturn ClientMG, err\r\n\t}\r\n\treturn ClientMG, nil\r\n}",
"func SetupClient() (*mongo.Client, error) {\n c := getMongoClient()\n ctx, _ := context.WithTimeout(context.Background(), 2*time.Second)\n err := c.Ping(ctx, readpref.Primary())\n if err != nil {\n fmt.Println(\"Cannot connect to MongoDB: \", err)\n c.Disconnect(context.TODO())\n }\n return c, err\n}",
"func Mongo(conf MongoConfig) (cl *mongo.Client, err error) {\n\tcl, err = mongo.NewClient(options.Client().ApplyURI(mongoURI(conf.Host, conf.Port)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tif err = cl.Connect(ctx); err != nil {\n\t\treturn nil, err\n\t}\n\treturn cl, nil\n}",
"func NewMongo(cfg *config.Config) *MongoService {\r\n\tlock.Lock()\r\n\tdefer lock.Unlock()\r\n\tif inited {\r\n\t\treturn ss\r\n\t}\r\n\tinited = true\r\n\tclientOptions := options.Client().ApplyURI(cfg.Mongo.MongoURL).SetMaxPoolSize(cfg.Mongo.Poolsize)\r\n\tconn, err := mongo.Connect(context.TODO(), clientOptions)\r\n\tif err != nil {\r\n\t\tlog.GetLogger().Error(err)\r\n\t\treturn nil\r\n\t}\r\n\tif err := conn.Ping(context.TODO(), nil); err != nil {\r\n\t\tlog.GetLogger().Error(err)\r\n\t\treturn nil\r\n\t}\r\n\tss = &MongoService{}\r\n\r\n\tss.db = conn.Database(cfg.Mongo.DbName)\r\n\r\n\t//test table exists\r\n\tctx := context.Background()\r\n\tresult, err := ss.db.ListCollectionNames(ctx, bson.M{\"name\": cfg.Mongo.TableName})\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\tif len(result) <= 0 {\r\n\t\tfmt.Printf(\"create document: '%s' in mongo database: '%s'\", cfg.Mongo.TableName, cfg.Mongo.DbName)\r\n\t\tsourceIndex := mongo.IndexModel{\r\n\t\t\tKeys: bson.M{\"source_id\": -1},\r\n\t\t\tOptions: nil,\r\n\t\t}\r\n\t\tctimeIndex := mongo.IndexModel{\r\n\t\t\tKeys: bson.M{\"c_time\": -1},\r\n\t\t\tOptions: nil,\r\n\t\t}\r\n\t\tparentIndex := mongo.IndexModel{\r\n\t\t\tKeys: bson.M{\"parent\": -1},\r\n\t\t\tOptions: nil,\r\n\t\t}\r\n\t\t_, err := ss.db.Collection(cfg.Mongo.TableName).Indexes().CreateMany(ctx, []mongo.IndexModel{sourceIndex, ctimeIndex, parentIndex})\r\n\t\tif err != nil {\r\n\t\t\tpanic(err)\r\n\t\t}\r\n\t}\r\n\tss.collection = ss.db.Collection(cfg.Mongo.TableName)\r\n\treturn ss\r\n}",
"func (m *Mongnod) New(context interface{}) (*mgo.Database, *mgo.Session, error) {\n\tkey := m.C.Host + \":\" + m.C.DB\n\n\tmasterListLock.Lock()\n\tms, ok := masterList[key]\n\tmasterListLock.Unlock()\n\n\tif ok {\n\t\tses := ms.Copy()\n\t\treturn ses.DB(m.C.DB), ses, nil\n\t}\n\n\t// If not found, then attemp to connect and add to session master list.\n\t// We need this object to establish a session to our MongoDB.\n\tinfo := mgo.DialInfo{\n\t\tAddrs: []string{m.C.Host},\n\t\tTimeout: 60 * time.Second,\n\t\tDatabase: m.C.AuthDB,\n\t\tUsername: m.C.User,\n\t\tPassword: m.C.Password,\n\t}\n\n\t// Create a session which maintains a pool of socket connections\n\t// to our MongoDB.\n\tses, err := mgo.DialWithInfo(&info)\n\tif err != nil {\n\t\tm.Log.Error(context, \"New\", err, \"Completed\")\n\t\treturn nil, nil, err\n\t}\n\n\tses.SetMode(mgo.Monotonic, true)\n\n\t// Add to master list.\n\tmasterListLock.Lock()\n\tmasterList[key] = ses.Copy()\n\tmasterListLock.Unlock()\n\n\treturn ses.DB(m.C.DB), ses, nil\n}",
"func Constructor(connURI string) (*Store, error) {\n\tmongoConnString, err := connstring.Parse(connURI)\n\tif err != nil {\n\t\tlog.Printf(\"Error parsing mongoDB url: %s\", err)\n\t}\n\n\tclient, err := mongodb.NewClient(options.Client().ApplyURI(connURI))\n\tif err != nil {\n\t\tlog.Printf(\"Error creating mongoDB client: %s\", err)\n\t}\n\n\terr = client.Connect()\n\tif err != nil {\n\t\tlog.Printf(\"Unable to connect to mongoDB: %s\", err)\n\t\treturn nil, err\n\t}\n\tlog.Printf(\"Connected to mongoDB: %s\", mongoConnString.AppName)\n\n\tdb := client.Database(mongoConnString.Database)\n\n\treturn &Store{db: db}, nil\n}",
"func InitConnection() {\n\tconnectionAttempted = true\n\tif variables.DBAddress == \"\" {\n\t\tMongoConn = nil\n\t\tlog.Println(\"No mongo server to connect to\")\n\t\treturn\n\t}\n\n\tconnection, err := mgo.Dial(variables.DBAddress)\n\tif err != nil {\n\t\tconnection = nil\n\t\tlog.Println(\"Could not connect to mongo server\")\n\t\treturn\n\t}\n\n\t// Optional. Switch the connection to a monotonic behavior.\n\tconnection.SetMode(mgo.Monotonic, true)\n\tconnection.SetSafe(&mgo.Safe{})\n\n\tMongoConn = connection\n\treturn\n}",
"func (c *Container) NewClient() (*mongo.Client, error) {\n\n\thost, err := c.Con.Host(c.ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tport, err := c.Con.MappedPort(c.ctx, defaultMappedPort)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclientOptions := options.Client().ApplyURI(fmt.Sprintf(\n\t\t\"mongodb://%s:%s@%s:%d\",\n\t\tc.req.User,\n\t\tc.req.Password,\n\t\thost,\n\t\tport.Int(),\n\t))\n\n\tclient, err := mongo.Connect(c.ctx, clientOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = client.Ping(context.TODO(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn client, nil\n}",
"func Open(ctx context.Context, connectionString string, database string) (*mongo.Database, error) {\n\tctx, cancel := context.WithTimeout(ctx, 5*time.Second)\n\tdefer cancel()\n\n\tclient, err := mongo.Connect(ctx, options.Client().ApplyURI(\"mongodb://localhost:27017\"))\n\n\treturn client.Database(database), errors.Wrap(err, \"connecting to database\")\n}",
"func MongoOpen(config MongoConfig) (connect MongoConnect) {\n\tctx, _ := context.WithTimeout(context.Background(), 10*time.Second)\n\tclient, err := mongo.Connect(ctx, options.Client().ApplyURI(fmt.Sprintf(\"mongodb://%s:%s@%s:%d/%s\", config.User, config.Password, config.Host, config.Port, config.DatabaseName)))\n\tif err != nil {\n\t\tlog.Fatalf(\"Couldn't connect to Mongo, err: %v\", err.Error())\n\t\treturn\n\t}\n\tdatabase := client.Database(config.DatabaseName)\n\n\tconnect.Client = client\n\tconnect.Database = database\n\treturn\n}",
"func Connect(ctx context.Context, sensor instana.TracerLogger, opts ...*options.ClientOptions) (*mongo.Client, error) {\n\treturn mongo.Connect(ctx, addInstrumentedCommandMonitor(opts, sensor)...)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checker is called by the healthcheck library to check the health state of this mongoDB instance | func (m *Mongo) Checker(ctx context.Context, state *healthcheck.CheckState) error {
return m.healthClient.Checker(ctx, state)
} | [
"func New(config Config) func() error {\n\tif config.TimeoutConnect == 0 {\n\t\tconfig.TimeoutConnect = defaultTimeoutConnect\n\t}\n\n\tif config.TimeoutDisconnect == 0 {\n\t\tconfig.TimeoutDisconnect = defaultTimeoutDisconnect\n\t}\n\n\tif config.TimeoutPing == 0 {\n\t\tconfig.TimeoutPing = defaultTimeoutPing\n\t}\n\n\treturn func() (checkErr error) {\n\t\tvar ctx context.Context\n\t\tvar cancel context.CancelFunc\n\n\t\tclient, err := mongo.NewClient(options.Client().ApplyURI(config.DSN))\n\t\tif err != nil {\n\t\t\tcheckErr = fmt.Errorf(\"mongoDB health check failed on client creation: %w\", err)\n\t\t\treturn\n\t\t}\n\n\t\tctx, cancel = context.WithTimeout(context.Background(), config.TimeoutConnect)\n\t\tdefer cancel()\n\n\t\terr = client.Connect(ctx)\n\t\tif err != nil {\n\t\t\tcheckErr = fmt.Errorf(\"mongoDB health check failed on connect: %w\", err)\n\t\t\treturn\n\t\t}\n\n\t\tdefer func() {\n\t\t\tctx, cancel = context.WithTimeout(context.Background(), config.TimeoutDisconnect)\n\t\t\tdefer cancel()\n\n\t\t\t// override checkErr only if there were no other errors\n\t\t\tif err := client.Disconnect(ctx); err != nil && checkErr == nil {\n\t\t\t\tcheckErr = fmt.Errorf(\"mongoDB health check failed on closing connection: %w\", err)\n\t\t\t}\n\t\t}()\n\n\t\tctx, cancel = context.WithTimeout(context.Background(), config.TimeoutPing)\n\t\tdefer cancel()\n\n\t\terr = client.Ping(ctx, readpref.Primary())\n\t\tif err != nil {\n\t\t\tcheckErr = fmt.Errorf(\"mongoDB health check failed on ping: %w\", err)\n\t\t\treturn\n\t\t}\n\n\t\treturn\n\t}\n}",
"func (rs *RedisStore) CheckHealth() error {\n\t_, err := rs.Cmd(\"PING\").Str()\n\n\treturn err\n}",
"func (g gormChecker) Check() healthcheck.Health {\n\tresp := queryResponse{}\n\th := healthcheck.NewHealth()\n\n\tif g.db == nil {\n\t\th.Down().AddInfo(\"error\", \"Empty resource\")\n\t\treturn h\n\t}\n\n\toutput := g.db.Raw(\"SELECT 1 as response\").Scan(&resp)\n\tif output.Error != nil {\n\t\th.Down().AddInfo(\"error\", output.Error.Error())\n\t\treturn h\n\t}\n\n\toutput = g.db.Raw(\"SELECT VERSION() as response\").Scan(&resp)\n\tif output.Error != nil {\n\t\th.Down().AddInfo(\"error\", output.Error.Error())\n\t\treturn h\n\t}\n\n\th.Up().AddInfo(\"version\", resp.Response)\n\treturn h\n}",
"func HealthCheck(w http.ResponseWriter, r *http.Request) {\n\t// Since we're here, we already know that HTTP service is up. Let's just check the state of the boltdb connection\n\tdbUp := DBClient.Check()\n\tif dbUp && isHealthy {\n\t\tdata, _ := json.Marshal(healthCheckResponse{Status: \"UP\"})\n\t\twriteJSONResponse(w, http.StatusOK, data)\n\t} else {\n\t\tdata, _ := json.Marshal(healthCheckResponse{Status: \"Database unaccessible\"})\n\t\twriteJSONResponse(w, http.StatusServiceUnavailable, data)\n\t}\n}",
"func (c *Checker) CheckHealth() error {\n\tselect {\n\tcase <-c.stopped:\n\t\tif !c.healthy {\n\t\t\treturn errors.New(\"ping stopped before becoming healthy\")\n\t\t}\n\t\treturn nil\n\tdefault:\n\t\treturn errors.New(\"still pinging database\")\n\t}\n}",
"func CheckStatusDB(ctx *gin.Context) {\n\tif !models.CheckMongoDBConnection() {\n\t\tlog.Fatal(\"Bad DB connection\")\n\t}\n\n\t// If DB connection is OK; pass on to the next-in-chain\n\tctx.Next()\n}",
"func MongoHealthCheck(url string) (string, error) {\n\tclientOptions := options.Client().ApplyURI(url)\n\tvar err error\n\tclient, err := mongo.Connect(context.TODO(), clientOptions)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = client.Ping(context.TODO(), nil)\n\tif err != nil {\n\t\tclient.Disconnect(context.TODO())\n\t\treturn \"\", err\n\t}\n\n\tclient.Disconnect(context.TODO())\n\treturn \"\", nil\n}",
"func CheckingConnection() int {\n\terr := MongoConn.Ping(context.TODO(), nil)\n\tif err != nil {\n\t\treturn 0\n\t}\n\n\treturn 1\n}",
"func (s *ServerPool) HealthCheck() {\n\tfor _, s := range s.servers {\n\t\tstatus := \"up\"\n\t\talive := isServerUp(s.URL)\n\t\ts.SetAlive(alive)\n\t\tif !alive {\n\t\t\tstatus = \"down\"\n\t\t}\n\t\tlog.Printf(\"%s [%s]\\n\", s.URL, status)\n\t}\n}",
"func (ih *InstanceHealth) HealthChecker(checkInterval time.Duration) {\n\tfor {\n\t\tif len(ih.instances) > 0 {\n\t\t\tfor host, instance := range ih.instances {\n\t\t\t\tif instance.healthy == true {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tupstream := fmt.Sprintf(\"%s:%d\", instance.host, instance.port)\n\t\t\t\tres, err := http.Get(upstream)\n\t\t\t\tif err == nil && res != nil {\n\t\t\t\t\tinstance.healthy = true\n\t\t\t\t\tih.instances[host] = instance\n\t\t\t\t\tfmt.Printf(\"%s makred as healty\\n\", instance.host)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(checkInterval)\n\t}\n}",
"func (n *NeptuneDriver) Checker(ctx context.Context, state *health.CheckState) error {\n\n\t// Perform healthcheck\n\t_, err := n.Healthcheck()\n\n\t// All errors are mapped to Critical status\n\tif err != nil {\n\t\tstate.Update(health.StatusCritical, err.Error(), 0)\n\t\treturn nil\n\t}\n\n\t// Success healthcheck is mapped to OK status\n\tstate.Update(health.StatusOK, msgHealthy, 0)\n\treturn nil\n}",
"func (s *ServerPool) HealthCheck() {\n\tfor _, b := range s.backends {\n\t\tstatus := \"up\"\n\t\tif isBackendAlive(b.URL) {\n\t\t\tb.SetAlive(true)\n\t\t}else {\n\t\t\tb.SetAlive(false)\n\t\t\tstatus = \"down\"\n\t\t}\n\t\tlog.Printf(\"%s [%s]\\n\", b.URL, status)\n\t}\n}",
"func (db *InfluxDB) Health() error {\n\tif db.client == nil {\n\t\treturn nil\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)\n\tdefer cancel()\n\tresult, err := db.client.Health(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif result.Status != influxdbDomain.HealthCheckStatusPass {\n\t\treturn errors.New(string(result.Status))\n\t}\n\treturn nil\n}",
"func (c *PingChecker) Check() error {\n\treturn c.pinger.Ping()\n}",
"func (p *Pool) CheckHealth() error {\n\tp.closedMu.RLock()\n\tclosed := p.closed\n\tp.closedMu.RUnlock()\n\tif closed {\n\t\treturn errors.New(\"sqlite pool health: closed\")\n\t}\n\n\tselect {\n\tcase <-p.ready:\n\t\tif p.err != nil {\n\t\t\treturn fmt.Errorf(\"sqlite pool health: %w\", p.err)\n\t\t}\n\t\treturn nil\n\tdefault:\n\t\treturn errors.New(\"sqlite pool health: not ready\")\n\t}\n}",
"func (s *Service) check(mysqlServerID int, startTime, endTime time.Time, step time.Duration) error {\n\t// init\n\terr := s.init(mysqlServerID, startTime, endTime, step)\n\tif err != nil {\n\t\tupdateErr := s.DASRepo.UpdateOperationStatus(s.OperationInfo.operationID, defaultFailedStatus, err.Error())\n\t\tif updateErr != nil {\n\t\t\tlog.Error(message.NewMessage(msghc.ErrHealthcheckUpdateOperationStatus, updateErr.Error()).Error())\n\t\t}\n\n\t\treturn err\n\t}\n\t// run asynchronously\n\tgo s.Engine.Run()\n\n\treturn nil\n}",
"func (d *IoTDBDataSource) CheckHealth(_ context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) {\n\n\tvar status = backend.HealthStatusOk\n\tvar message = \"Data source is working\"\n\n\tloginStatus, err := d.IoTDBLogin(req)\n\n\tif err != nil {\n\t\tlog.DefaultLogger.Error(\"Parsing JSON error\", err)\n\t\tstatus = backend.HealthStatusError\n\t\tmessage = fmt.Sprint(\"Failed to connect to iotdb service.\", err.Error())\n\t} else if loginStatus.Code != 200 {\n\t\tstatus = backend.HealthStatusError\n\t\tmessage = loginStatus.Message\n\t}\n\treturn &backend.CheckHealthResult{\n\t\tStatus: status,\n\t\tMessage: message,\n\t}, nil\n}",
"func checkDB(ctx context.Context, db *db.DB) error {\n\tdbConn := db.Copy()\n\tdefer dbConn.Close()\n\n\treturn dbConn.StatusCheck(ctx)\n}",
"func (n *NeptuneDriver) Healthcheck() (s string, err error) {\n\tif _, err = n.Pool.Get(pingStmt, nil, nil); err != nil {\n\t\treturn serviceName, err\n\t}\n\treturn serviceName, nil\n}",
"func ping(client *mongo.Client, ctx context.Context) error{\n \n // mongo.Client has Ping to ping mongoDB, deadline of\n // the Ping method will be determined by cxt\n // Ping method return error if any occored, then\n // the error can be handled.\n if err := client.Ping(ctx, readpref.Primary()); err != nil {\n return err\n }\n fmt.Println(\"connected successfully\")\n return nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Itoabs returns the absolute value of x. | func Itoabs(x int64) int64 {
if x < 0 {
return -x
}
return x
} | [
"func IAbs(x int) int {\n\tif x >= 0 {\n\t\treturn x\n\t}\n\treturn x * -1\n}",
"func _abs(i numint) numint {\n\tif i < 0 {\n\t\treturn -i\n\t}\n\treturn i\n}",
"func AbsI(i int) int {\n\tif i < 0 {\n\t\treturn i * -1\n\t}\n\treturn i\n}",
"func Ftoabs(x float32) float32 {\n\tif x < 0 {\n\t\treturn -x\n\t}\n\treturn x\n}",
"func abs(x Literal) int {\n\tif x < 0 {\n\t\treturn int(-x)\n\t}\n\treturn int(x)\n}",
"func absolute(value int) int {\n\tif value < 0 {\n\t\treturn -value\n\t}\n\treturn value\n}",
"func (i Index) Abs() (absolute Index) {\n\tif i < 0 {\n\t\treturn -i\n\t}\n\n\treturn i\n}",
"func AbsInt(x int) int {\n\tif x >= 0 {\n\t\treturn x\n\t}\n\tif x == minInt {\n\t\tpanic(\"absolute overflows int\")\n\t}\n\treturn -x\n}",
"func Abs(x int16) int16 {\n\tif x < 0 {\n\t\tif -x < 0 {\n\t\t\t// x == MinValue\n\t\t\treturn MaxValue\n\t\t}\n\t\treturn -x\n\t}\n\treturn x\n}",
"func (z *Big) Abs(x *Big) *Big {\n\tif x.form != finite {\n\t\treturn z\n\t}\n\tif x.isCompact() {\n\t\tz.compact = arith.Abs(x.compact)\n\t} else {\n\t\tz.unscaled.Abs(&x.unscaled)\n\t}\n\tz.scale = x.scale\n\tz.form = finite\n\treturn z\n}",
"func (s Ints) Abs() (out Ints) {\n\tfor _, v := range s {\n\t\tout = append(out, int64(math.Abs(float64(v))))\n\t}\n\treturn\n}",
"func AbsInt(a int) int {\n\treturn int(math.Abs(float64(a)))\n}",
"func (i I128) Abs() I128 {\n\tif i.hi&signBit != 0 {\n\t\ti.hi = ^i.hi\n\t\ti.lo = ^(i.lo - 1)\n\t\tif i.lo == 0 { // handle carry\n\t\t\ti.hi++\n\t\t}\n\t}\n\treturn i\n}",
"func filterAbs(ctx Context, val Value, args ...Value) Value {\n\tn := CoerceNumber(val)\n\tif 0 == n {\n\t\treturn n\n\t}\n\treturn math.Abs(n)\n}",
"func Abs(float64) float64",
"func (a Vec) Abs() Vec {\n\treturn Vec{math.Abs(a.X), math.Abs(a.Y)}\n}",
"func (z *Int) CmpAbs(x *Int) int {\n\tx.doinit()\n\tz.doinit()\n\treturn compared(C.mpz_cmpabs(&z.i[0], &x.i[0]))\n}",
"func Abs(f float32) float32 {\n\tif f < 0.0 {\n\t\treturn f * -1.0\n\t}\n\treturn f\n}",
"func (p Progress) Absolute() float32 {\n\tif p.direction == Forward {\n\t\treturn p.Progress()\n\t}\n\treturn 1 - p.Progress()\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ftoabs returns the absolute value of x. | func Ftoabs(x float32) float32 {
if x < 0 {
return -x
}
return x
} | [
"func Itoabs(x int64) int64 {\n\tif x < 0 {\n\t\treturn -x\n\t}\n\treturn x\n}",
"func abs(x Literal) int {\n\tif x < 0 {\n\t\treturn int(-x)\n\t}\n\treturn int(x)\n}",
"func Abs(f float32) float32 {\n\tif f < 0.0 {\n\t\treturn f * -1.0\n\t}\n\treturn f\n}",
"func Abs(float64) float64",
"func absolute(value int) int {\n\tif value < 0 {\n\t\treturn -value\n\t}\n\treturn value\n}",
"func _abs(i numint) numint {\n\tif i < 0 {\n\t\treturn -i\n\t}\n\treturn i\n}",
"func (z *Big) Abs(x *Big) *Big {\n\tif x.form != finite {\n\t\treturn z\n\t}\n\tif x.isCompact() {\n\t\tz.compact = arith.Abs(x.compact)\n\t} else {\n\t\tz.unscaled.Abs(&x.unscaled)\n\t}\n\tz.scale = x.scale\n\tz.form = finite\n\treturn z\n}",
"func filterAbs(ctx Context, val Value, args ...Value) Value {\n\tn := CoerceNumber(val)\n\tif 0 == n {\n\t\treturn n\n\t}\n\treturn math.Abs(n)\n}",
"func Abs(x int16) int16 {\n\tif x < 0 {\n\t\tif -x < 0 {\n\t\t\t// x == MinValue\n\t\t\treturn MaxValue\n\t\t}\n\t\treturn -x\n\t}\n\treturn x\n}",
"func IAbs(x int) int {\n\tif x >= 0 {\n\t\treturn x\n\t}\n\treturn x * -1\n}",
"func AbsF64(a float64) float64 {\n\tif a < 0.0 {\n\t\treturn -a\n\t}\n\treturn a\n}",
"func (p Progress) Absolute() float32 {\n\tif p.direction == Forward {\n\t\treturn p.Progress()\n\t}\n\treturn 1 - p.Progress()\n}",
"func (m Mat2x3) Abs() Mat2x3 {\n\treturn Mat2x3{Abs(m[0]), Abs(m[1]), Abs(m[2]), Abs(m[3]), Abs(m[4]), Abs(m[5])}\n}",
"func (m Mat3x2) Abs() Mat3x2 {\n\treturn Mat3x2{Abs(m[0]), Abs(m[1]), Abs(m[2]), Abs(m[3]), Abs(m[4]), Abs(m[5])}\n}",
"func (a *Attenuation) FieldDBToAbs() float64 {\n\treturn math.Pow(10, float64(*a)/20)\n}",
"func (a Vec) Abs() Vec {\n\treturn Vec{math.Abs(a.X), math.Abs(a.Y)}\n}",
"func AbsInt(x int) int {\n\tif x >= 0 {\n\t\treturn x\n\t}\n\tif x == minInt {\n\t\tpanic(\"absolute overflows int\")\n\t}\n\treturn -x\n}",
"func AbsInt(a int) int {\n\treturn int(math.Abs(float64(a)))\n}",
"func (e *ErrDecimal) Abs(d, x *Decimal) *Decimal {\n\tif e.Err() != nil {\n\t\treturn d\n\t}\n\te.update(e.Ctx.Abs(d, x))\n\treturn d\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
decode ccqline to (key string, symbols []string) | func decodeCCQLine(line string) CcqLine {
temp := strings.Split(line, " ")
// clean word slice in case some "" inside
temp = func(a []string) []string {
b := []string{}
for _, w := range a {
if w != "" {
b = append(b, w)
}
}
return b
}(temp)
key := temp[0][1:]
temp = temp[1:]
temp[0] = temp[0][1:]
lastWord := temp[len(temp)-1]
lastWord = lastWord[:len(lastWord)-2]
temp[len(temp)-1] = lastWord
return CcqLine{key, temp}
} | [
"func GetKeyFromString(ns string, recInfoLine string) (*as.Key, error) {\n recInfoList := strings.Split(recInfoLine, FIELD_DEL)\n keyDigest := recInfoList[REC_LINE_OFFSET_DG]\n byteDigest, err := hex.DecodeString(keyDigest)\n PanicOnError(err)\n return as.NewKeyWithDigest(ns, recInfoList[REC_LINE_OFFSET_SET], \"\", byteDigest)\n}",
"func ExtractQnameAndQtypeFromKey(key []byte) (string, uint16) {\n\tres := bytes.Split(key, []byte(\".|\"))\n\treturn string(res[0]), dns.StringToType[string(res[1])]\n}",
"func qparse(q string) *proto.Question {\n\n\tlines := strings.Split(q, \"\\n\")\n\n\t// title\n\tsblmnt := lines[0][0:2]\n\tsection := string(lines[0][2])\n\tseqnum, err := strconv.Atoi(lines[0][3:5])\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tseqnumber := int32(seqnum)\n\n\tkeyChoice := lines[0][7:8]\n\n\tchapter := \"\"\n\tif len(lines[0]) > 10 {\n\t\tchapter = lines[0][10:]\n\t}\n\n\t// stem\n\tstem := \"\"\n\ti := 1\n\tfor i = 1; i < len(lines); i++ {\n\t\tif lines[i][0:2] == \"A.\" {\n\t\t\tbreak\n\t\t} else {\n\t\t\tstem += lines[i]\n\t\t\tstem += \" \"\n\t\t}\n\t}\n\tstem = strings.TrimSpace(stem)\n\tfig := checkContaintedFigure(stem)\n\n\t// answers\n\tkey := \"\"\n\tvar distractor []string\n\tfor _, t := range []string{\"B.\", \"C.\", \"D.\", \"~~\"} {\n\t\tans := \"\"\n\t\tfor ; i < len(lines); i++ {\n\t\t\tif lines[i][0:2] == t {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tans += lines[i]\n\t\t\t\tans += \" \"\n\t\t\t}\n\t\t}\n\t\tans = strings.TrimSpace(ans)\n\n\t\tif string(ans[0]) == keyChoice {\n\t\t\tkey = ans[3:]\n\n\t\t} else {\n\t\t\tdistractor = append(distractor, ans[3:])\n\n\t\t}\n\t}\n\n\tres := proto.Question{\n\t\tSubelement: sblmnt,\n\t\tGroup: section,\n\t\tSequence: seqnumber,\n\t\tChapter: chapter,\n\t\tStem: stem,\n\t\tKey: key,\n\t\tDistractors: distractor,\n\t\tFigure: fig,\n\t}\n\n\treturn &res\n}",
"func Decode(msg string) map[string]string {\n\ttable := make(map[string]string)\n\tlines := strings.Split(msg, msgsep)\n\tfor _, line := range lines {\n\t\tsplits := strings.SplitN(line, msgeq, 2)\n\t\tif len(splits) < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tk, v := splits[0], splits[1]\n\t\ttable[k] = v\n\t}\n\treturn table\n}",
"func getTranslatedKeyCodes(inputEvents []INPUT_RECORD, escapeSequence []byte) string {\n\tvar buf bytes.Buffer\n\tfor i := 0; i < len(inputEvents); i++ {\n\t\tinput := inputEvents[i]\n\t\tif input.EventType == KEY_EVENT && input.KeyEvent.KeyDown != 0 {\n\t\t\tkeyString := mapKeystokeToTerminalString(&input.KeyEvent, escapeSequence)\n\t\t\tbuf.WriteString(keyString)\n\t\t}\n\t}\n\treturn buf.String()\n}",
"func frame_keys(cName *C.char, cFName *C.char) *C.char {\n\tcollectionName := C.GoString(cName)\n\tframeName := C.GoString(cFName)\n\tkeys := FrameKeys(collectionName, frameName)\n\tsrc, err := json.Marshal(keys)\n\tif err != nil {\n\t\treturn C.CString(\"[]\")\n\t}\n\ttxt := fmt.Sprintf(\"%s\", src)\n\treturn C.CString(txt)\n}",
"func args2Go(args C.SoapySDRKwargs) map[string]string {\n\n\tresults := make(map[string]string, args.size)\n\n\tkeys := (**C.char)(unsafe.Pointer(args.keys))\n\tvals := (**C.char)(unsafe.Pointer(args.vals))\n\n\t// Read all the strings\n\tfor i := 0; i < int(args.size); i++ {\n\t\tkey := (**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(keys)) + uintptr(i)*unsafe.Sizeof(*keys)))\n\t\tval := (**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(vals)) + uintptr(i)*unsafe.Sizeof(*vals)))\n\t\tresults[C.GoString(*key)] = C.GoString(*val)\n\t}\n\n\treturn results\n}",
"func (p *ChannelTracker) getSymbolToPrefixMapping(b *seabird.Bot) (map[rune]rune, bool) {\n\tlogger := b.GetLogger()\n\n\t// Sample: (qaohv)~&@%+\n\tprefix, _ := p.isupport.GetRaw(\"PREFIX\")\n\n\tlogger = logger.WithField(\"prefix\", prefix)\n\n\t// We only care about the symbols\n\ti := strings.IndexByte(prefix, ')')\n\tif len(prefix) == 0 || prefix[0] != '(' || i < 0 {\n\t\tlogger.Warnf(\"Invalid prefix format\")\n\t\treturn nil, false\n\t}\n\n\t// We loop through the string using range so we get bytes, then we throw the\n\t// two results together in the map.\n\tvar symbols []rune // ~&@%+\n\tfor _, r := range prefix[i+1:] {\n\t\tsymbols = append(symbols, r)\n\t}\n\tvar modes []rune // qaohv\n\tfor _, r := range prefix[1:i] {\n\t\tmodes = append(modes, r)\n\t}\n\n\tif len(modes) != len(symbols) {\n\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\"modes\": modes,\n\t\t\t\"symbols\": symbols,\n\t\t}).Warnf(\"Mismatched modes and symbols\")\n\n\t\treturn nil, false\n\t}\n\n\tprefixes := make(map[rune]rune)\n\tfor k := range symbols {\n\t\tprefixes[symbols[k]] = modes[k]\n\t}\n\n\treturn prefixes, true\n}",
"func parseRedisCommand(line string) RedisCommand {\n\tparts := [][]byte{}\n\tbs := []byte(line)\n\tinQuotes := false\n\tpart := make([]byte, 0, len(bs))\n\ti := 0\n\tfor i < len(bs) {\n\t\tc := bs[i]\n\t\ti++\n\t\tif inQuotes {\n\t\t\tswitch c {\n\t\t\tcase '\"':\n\t\t\t\tinQuotes = false\n\t\t\t\tnewPart := make([]byte, len(part))\n\t\t\t\tcopy(newPart, part)\n\t\t\t\tparts = append(parts, newPart)\n\t\t\tcase '\\\\':\n\t\t\t\tc := bs[i]\n\t\t\t\ti++\n\t\t\t\tswitch c {\n\t\t\t\tcase 'x':\n\t\t\t\t\thexNum := bs[i : i+2]\n\t\t\t\t\tord, err := strconv.ParseInt(string(hexNum), 16, 16)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Printf(\"BAD HEX NUMBER %s: %v\", string(hexNum), err)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tpart = append(part, byte(ord))\n\t\t\t\t\t}\n\t\t\t\t\ti += 2\n\t\t\t\tcase '\"', '\\\\':\n\t\t\t\t\tpart = append(part, c)\n\t\t\t\tcase 'r':\n\t\t\t\t\tpart = append(part, 13)\n\t\t\t\tcase 'a':\n\t\t\t\t\tpart = append(part, 7)\n\t\t\t\tcase 'b':\n\t\t\t\t\tpart = append(part, 8)\n\t\t\t\tcase 'n':\n\t\t\t\t\tpart = append(part, 10)\n\t\t\t\tcase 't':\n\t\t\t\t\tpart = append(part, 9)\n\t\t\t\tdefault:\n\t\t\t\t\tlog.Printf(\"UNEXPECTED ESCAPED CHAR %v %s\", c, string([]byte{c}))\n\t\t\t\t}\n\n\t\t\tdefault:\n\t\t\t\tpart = append(part, c)\n\t\t\t}\n\t\t} else {\n\t\t\tif c == '\"' {\n\t\t\t\tinQuotes = true\n\t\t\t\tpart = part[0:0]\n\t\t\t}\n\t\t}\n\t}\n\tvar cmd RedisCommand\n\tcmd.Raw = line\n\tif len(parts) < 1 {\n\t\treturn cmd\n\t}\n\tcmd.Name = string(parts[0])\n\tcmd.Args = parts[1:]\n\tcmd.IArgs = make([]interface{}, len(parts))\n\tfor i := range parts {\n\t\tcmd.IArgs[i] = interface{}(parts[i])\n\t}\n\n\treturn cmd\n}",
"func Key(qname string, qtype uint16) []byte {\n\treturn []byte(dns.Fqdn(qname) + \"|\" + dns.TypeToString[qtype])\n}",
"func (c B64Cursor) Key() string {\n\ts := \"\"\n\ti := 0\n\tkeys := make([]string, len(c.keyValues))\n\n\tfor k := range c.keyValues {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\n\tsort.Strings(keys)\n\n\tfor _, k := range keys {\n\t\ts += k + \",\"\n\t}\n\n\tif len(s) != 0 {\n\t\ts = s[:len(s)-1]\n\t}\n\n\treturn s\n}",
"func (k Key) Code() string {\n\tbuf := bytes.NewBuffer(nil)\n\tbuf.WriteString(\"[]byte{\")\n\tfor i, b := range k {\n\t\tif i > 0 {\n\t\t\tbuf.WriteString(\", \")\n\t\t}\n\t\tbuf.WriteString(strconv.Itoa(int(b)))\n\t}\n\tbuf.WriteString(\"}\")\n\treturn buf.String()\n}",
"func ReadSymbol(src []byte) (symbol string, ok bool) { return readstring(src) }",
"func DecodeQuote(in []byte) ([]byte, uint16, uint16, []byte, error) {\n\tvar empty []byte\n\tvar buf []byte\n\tvar attest []byte\n\tvar signature []byte\n\tvar s1 uint16\n\tvar s2 uint16\n\n\ttemplate := []interface{}{&empty, &buf}\n\terr := unpack(in, template)\n\tif err != nil {\n\t\treturn nil, 0, 0, nil, errors.New(\"Can't decode Quote response\")\n\t}\n\n\ttemplate = []interface{}{&attest, &s1, &s2, &signature}\n\terr = unpack(buf, template)\n\tif err != nil {\n\t\treturn nil, 0, 0, nil, errors.New(\"Can't decode Quote response\")\n\t}\n\treturn attest, s1, s2, signature, nil\n}",
"func (s SymmetricKey) Octets() []byte {\n\treturn s.Key\n}",
"func DecodeKey(suite kyber.Group, X kyber.Point, Cs []kyber.Point, XhatEnc kyber.Point,\n\txc kyber.Scalar) (key []byte, err error) {\n\tlog.LLvl4(\"xc:\", xc)\n\txcInv := suite.Scalar().Neg(xc)\n\tlog.LLvl4(\"xcInv:\", xcInv)\n\tsum := suite.Scalar().Add(xc, xcInv)\n\tlog.LLvl4(\"xc + xcInv:\", sum, \"::\", xc)\n\tlog.LLvl4(\"X:\", X)\n\tXhatDec := suite.Point().Mul(xcInv, X)\n\tlog.LLvl4(\"XhatDec:\", XhatDec)\n\tlog.LLvl4(\"XhatEnc:\", XhatEnc)\n\tXhat := suite.Point().Add(XhatEnc, XhatDec)\n\tlog.LLvl4(\"Xhat:\", Xhat)\n\tXhatInv := suite.Point().Neg(Xhat)\n\tlog.LLvl4(\"XhatInv:\", XhatInv)\n\n\t// Decrypt Cs to keyPointHat\n\tfor _, C := range Cs {\n\t\tlog.LLvl4(\"C:\", C)\n\t\tkeyPointHat := suite.Point().Add(C, XhatInv)\n\t\tlog.LLvl4(\"keyPointHat:\", keyPointHat)\n\t\tkeyPart, err := keyPointHat.Data()\n\t\tlog.LLvl4(\"keyPart:\", keyPart)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tkey = append(key, keyPart...)\n\t}\n\treturn\n}",
"func ReadKey(src []byte) (string, bool) { return readcstring(src) }",
"func key(s string) []byte {\n\tb := []byte(s)\n\tl := len(b)\n\tfor i := 0; i < l/2; i++ {\n\t\tb[i], b[l-1-i] = b[l-1-i], b[i]\n\t}\n\treturn b\n}",
"func HexDecodeKey(str string) ([]byte, error) {\n\treturn hex.DecodeString(strings.ReplaceAll(str, \":\", \"\"))\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
sqlQueryInts takes a repostory ID, a SQL statement and returns intergers retrieved. | func sqlQueryInts(config *Config, repoID string, stmt string) ([]int, error) {
if db, ok := config.Connections[repoID]; ok {
rows, err := db.Query(stmt)
if err != nil {
return nil, fmt.Errorf("ERROR: query error (%q), %s", repoID, err)
}
defer rows.Close()
value := 0
values := []int{}
for rows.Next() {
err := rows.Scan(&value)
if err == nil {
values = append(values, value)
} else {
return nil, fmt.Errorf("ERROR: scan error (%q), %s", repoID, err)
}
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("ERROR: rows error (%q), %s", repoID, err)
}
return values, nil
}
return nil, fmt.Errorf("bad request")
} | [
"func sqlQueryIntIDs(config *Config, repoID string, stmt string, args ...interface{}) ([]int, error) {\n\tif db, ok := config.Connections[repoID]; ok {\n\t\trows, err := db.Query(stmt, args...)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ERROR: query error (%q), %s\", repoID, err)\n\t\t}\n\t\tdefer rows.Close()\n\t\tvalue := 0\n\t\tvalues := []int{}\n\t\tfor rows.Next() {\n\t\t\terr := rows.Scan(&value)\n\t\t\tif (err == nil) && (value > 0) {\n\t\t\t\tvalues = append(values, value)\n\t\t\t} else {\n\t\t\t\treturn nil, fmt.Errorf(\"ERROR: scan error (%q), %s\", repoID, err)\n\t\t\t}\n\t\t}\n\t\tif err := rows.Err(); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ERROR: rows error (%q), %s\", repoID, err)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ERROR: query error (%q), %s\", repoID, err)\n\t\t}\n\t\treturn values, nil\n\t}\n\treturn nil, fmt.Errorf(\"bad request\")\n}",
"func (db *PGX) getQueryInt(sql string, arguments ...interface{}) (result int, err error) {\n\terr = db.Conn.QueryRow(context.Background(), sql, arguments...).Scan(&result)\n\tif err != nil {\n\t\tdb.log.Printf(\"error : getQueryInt(%s) queryRow unexpectedly failed. args : (%v), error : %v\\n\", sql, arguments, err)\n\t\treturn 0, err\n\t}\n\treturn result, err\n}",
"func queryInt(query string) int {\n\tif cursor, err := db.Query(nil, query, make(map[string]interface{})); err != nil {\n\t\tlog.Println(err)\n\t} else {\n\t\tdefer cursor.Close()\n\t\tvar i int\n\t\tfor cursor.HasMore() {\n\t\t\tif _, err = cursor.ReadDocument(nil, &i); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t\treturn i\n\t\t}\n\t}\n\tlog.Panic(\"query was not successful\")\n\treturn 0\n}",
"func sqlQueryStringIDs(config *Config, repoID string, stmt string, args ...interface{}) ([]string, error) {\n\tif db, ok := config.Connections[repoID]; ok {\n\t\trows, err := db.Query(stmt, args...)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ERROR: query error (%q), %s\", repoID, err)\n\t\t}\n\t\tdefer rows.Close()\n\t\tvalue := ``\n\t\tvalues := []string{}\n\t\tfor rows.Next() {\n\t\t\terr := rows.Scan(&value)\n\t\t\tif err == nil {\n\t\t\t\tvalues = append(values, value)\n\t\t\t} else {\n\t\t\t\treturn nil, fmt.Errorf(\"ERROR: scan error (%q), %q, %s\", repoID, stmt, err)\n\t\t\t}\n\t\t}\n\t\tif err := rows.Err(); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ERROR: rows error (%q), %s\", repoID, err)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ERROR: query error (%q), %s\", repoID, err)\n\t\t}\n\t\treturn values, nil\n\t}\n\treturn nil, fmt.Errorf(\"bad request\")\n}",
"func (e *executor) SelectInt(query string, args ...interface{}) (int64, error) {\n\tvar result int64\n\tvar err error\n\n\tjgorp.Trace(e.ctx, e.name, format(query, args), func() error {\n\t\tresult, err = e.inner.SelectInt(query, args...)\n\t\treturn err\n\t})\n\n\treturn result, err\n}",
"func (g *GrabBag) IntSlice(query string) []int {\n\tres := g.Grab(query)\n\tif res != nil {\n\t\treturn res.([]int)\n\t}\n\treturn []int{}\n}",
"func (db *RDBMS) QJsonIntArr(query string, params ...any) []int64 {\n\tstr := db.QStr(query, params...)\n\tax := S.JsonToArr(str)\n\tai := []int64{}\n\tfor _, v := range ax {\n\t\tai = append(ai, X.ToI(v))\n\t}\n\treturn ai\n}",
"func IntArray() error {\n\tdb, _ := sql.Open(\"go_ibm_db\", connStr)\n\tdefer db.Close()\n\tdb.Exec(\"Drop table arr\")\n\t_, err := db.Exec(\"create table arr(var1 int)\")\n\tif err != nil {\n\t\treturn err\n\t}\n\ta := []int{2, 3}\n\tb := []int8{2, 3}\n\tc := []int16{2, 3}\n\td := []int32{2, 3}\n\te := []int64{2, 3}\n\tst, err := db.Prepare(\"Insert into arr values(?)\")\n\tdefer st.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = st.Query(a)\n\tif !strings.Contains(fmt.Sprint(err), \"did not create a result set\") {\n\t\tfmt.Println(\"Error while inserting []int\")\n\t\treturn err\n\t}\n\t_, err = st.Query(b)\n\tif !strings.Contains(fmt.Sprint(err), \"did not create a result set\") {\n\t\tfmt.Println(\"Error while inserting []int8\")\n\t\treturn err\n\t}\n\t_, err = st.Query(c)\n\tif !strings.Contains(fmt.Sprint(err), \"did not create a result set\") {\n\t\tfmt.Println(\"Error while inserting []int16\")\n\t\treturn err\n\t}\n\t_, err = st.Query(d)\n\tif !strings.Contains(fmt.Sprint(err), \"did not create a result set\") {\n\t\tfmt.Println(\"Error while inserting []int32\")\n\t\treturn err\n\t}\n\t_, err = st.Query(e)\n\tif !strings.Contains(fmt.Sprint(err), \"did not create a result set\") {\n\t\tfmt.Println(\"Error while inserting []int64\")\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (d *sqlDialect) literalInt(b sb.SQLBuilder, i int64) {\n\tif b.IsPrepared() {\n\t\td.placeHolderSQL(b, i)\n\t\treturn\n\t}\n\tb.WriteStrings(strconv.FormatInt(i, 10))\n}",
"func (q *Query) WhereInt(index string, condition int, keys ...int) *Query {\n\n\tq.ser.PutVarCUInt(queryCondition).PutVString(index).PutVarCUInt(q.nextOp).PutVarCUInt(condition)\n\tq.nextOp = opAND\n\n\tq.ser.PutVarCUInt(len(keys))\n\tfor _, v := range keys {\n\t\tq.ser.PutVarCUInt(valueInt).PutVarInt(int64(v))\n\t}\n\treturn q\n}",
"func (ch *Channel) QueryInt(query string) (int, error) {\n\treturn ivi.QueryInt(ch.inst, query)\n}",
"func SQLquery(access, q string) (int, []string, error) {\r\n\t//connString := `Server=localhost;Database=master;Trusted_Connection=True;`\r\n\tvar err error\r\n\tdb, err = sql.Open(\"sqlserver\", access)\r\n\tdefer db.Close()\r\n\tif err != nil {\r\n\t\tlog.Fatal(\"Error creating connection pool: \", err.Error())\r\n\t}\r\n\tctx := context.Background()\r\n\terr = db.PingContext(ctx)\r\n\tif err != nil {\r\n\t\tfmt.Println(err)\r\n\t}\r\n\tfmt.Printf(\"Connected!\\n\")\r\n\tcount, result, err := read(q)\r\n\tfmt.Printf(\"Query completed!\\n\")\r\n\treturn count, result, err\r\n}",
"func selectInInt64(b squirrel.SelectBuilder, expr string, values []int64) squirrel.SelectBuilder {\n\tif len(values) == 0 {\n\t\treturn b\n\t}\n\targs := make([]interface{}, len(values))\n\tfor i, val := range values {\n\t\targs[i] = val\n\t}\n\treturn b.Where(expr+\" IN (\"+squirrel.Placeholders(len(args))+\")\", args...)\n}",
"func (d *PsqlDB) QueryIn(sql string, args ...interface{}) (*sql.Rows, error) {\n\tquery, args, err := sqlx.In(sql, args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tquery = d.connection.Rebind(query)\n\treturn d.connection.Query(query, args...)\n}",
"func Int32In(vs ...int32) predicate.IssueX1355 {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.IssueX1355(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(v) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.In(s.C(FieldInt32), v...))\n\t})\n}",
"func GetIntData(response *bcsmonitor.QueryResponse) int {\n\tif len(response.Data.Result) == 0 {\n\t\treturn 0\n\t}\n\tvalueStr, ok := response.Data.Result[0].Value[1].(string)\n\tif !ok {\n\t\treturn 0\n\t}\n\tvalue, err := strconv.Atoi(valueStr)\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn value\n}",
"func (r RankingData) QueryIds() []int {\n\tif r.ids == nil {\n\t\tfor qid := range r.data {\n\t\t\tr.ids = append(r.ids, qid)\n\t\t}\n\t}\n\treturn r.ids\n}",
"func getint64(val *querypb.BindVariable) (iv int64, status int) {\n\tbv, err := sqltypes.BindVariableToValue(val)\n\tif err != nil {\n\t\treturn 0, QROutOfRange\n\t}\n\tv, err := bv.ToCastInt64()\n\tif err != nil {\n\t\treturn 0, QROutOfRange\n\t}\n\treturn v, QROK\n}",
"func (db *RDBMS) QStrIntMap(query string, params ...any) M.SI {\n\tres := M.SI{}\n\trows := db.QAll(query, params...)\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tkey := ``\n\t\tval := int64(0)\n\t\trows.Scan(&key, &val)\n\t\tres[key] = val\n\t}\n\treturn res\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
sqlQueryIntIDs takes a repostory ID, a SQL statement and applies the args returning a list of integer id or error. | func sqlQueryIntIDs(config *Config, repoID string, stmt string, args ...interface{}) ([]int, error) {
if db, ok := config.Connections[repoID]; ok {
rows, err := db.Query(stmt, args...)
if err != nil {
return nil, fmt.Errorf("ERROR: query error (%q), %s", repoID, err)
}
defer rows.Close()
value := 0
values := []int{}
for rows.Next() {
err := rows.Scan(&value)
if (err == nil) && (value > 0) {
values = append(values, value)
} else {
return nil, fmt.Errorf("ERROR: scan error (%q), %s", repoID, err)
}
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("ERROR: rows error (%q), %s", repoID, err)
}
if err != nil {
return nil, fmt.Errorf("ERROR: query error (%q), %s", repoID, err)
}
return values, nil
}
return nil, fmt.Errorf("bad request")
} | [
"func sqlQueryInts(config *Config, repoID string, stmt string) ([]int, error) {\n\tif db, ok := config.Connections[repoID]; ok {\n\t\trows, err := db.Query(stmt)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ERROR: query error (%q), %s\", repoID, err)\n\t\t}\n\t\tdefer rows.Close()\n\t\tvalue := 0\n\t\tvalues := []int{}\n\t\tfor rows.Next() {\n\t\t\terr := rows.Scan(&value)\n\t\t\tif err == nil {\n\t\t\t\tvalues = append(values, value)\n\t\t\t} else {\n\t\t\t\treturn nil, fmt.Errorf(\"ERROR: scan error (%q), %s\", repoID, err)\n\t\t\t}\n\t\t}\n\t\tif err := rows.Err(); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ERROR: rows error (%q), %s\", repoID, err)\n\t\t}\n\t\treturn values, nil\n\t}\n\treturn nil, fmt.Errorf(\"bad request\")\n}",
"func sqlQueryStringIDs(config *Config, repoID string, stmt string, args ...interface{}) ([]string, error) {\n\tif db, ok := config.Connections[repoID]; ok {\n\t\trows, err := db.Query(stmt, args...)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ERROR: query error (%q), %s\", repoID, err)\n\t\t}\n\t\tdefer rows.Close()\n\t\tvalue := ``\n\t\tvalues := []string{}\n\t\tfor rows.Next() {\n\t\t\terr := rows.Scan(&value)\n\t\t\tif err == nil {\n\t\t\t\tvalues = append(values, value)\n\t\t\t} else {\n\t\t\t\treturn nil, fmt.Errorf(\"ERROR: scan error (%q), %q, %s\", repoID, stmt, err)\n\t\t\t}\n\t\t}\n\t\tif err := rows.Err(); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ERROR: rows error (%q), %s\", repoID, err)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ERROR: query error (%q), %s\", repoID, err)\n\t\t}\n\t\treturn values, nil\n\t}\n\treturn nil, fmt.Errorf(\"bad request\")\n}",
"func (sciq *StatusCheckInQuery) IDs(ctx context.Context) ([]int, error) {\n\tvar ids []int\n\tif err := sciq.Select(statuscheckin.FieldID).Scan(ctx, &ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ids, nil\n}",
"func (grq *GithubRepositoryQuery) IDs(ctx context.Context) (ids []int, err error) {\n\tif grq.ctx.Unique == nil && grq.path != nil {\n\t\tgrq.Unique(true)\n\t}\n\tctx = setContextOp(ctx, grq.ctx, \"IDs\")\n\tif err = grq.Select(githubrepository.FieldID).Scan(ctx, &ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ids, nil\n}",
"func (srq *StatusRoomQuery) IDs(ctx context.Context) ([]int, error) {\n\tvar ids []int\n\tif err := srq.Select(statusroom.FieldID).Scan(ctx, &ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ids, nil\n}",
"func (r RankingData) QueryIds() []int {\n\tif r.ids == nil {\n\t\tfor qid := range r.data {\n\t\t\tr.ids = append(r.ids, qid)\n\t\t}\n\t}\n\treturn r.ids\n}",
"func (irq *InspectionResultQuery) IDs(ctx context.Context) ([]int, error) {\n\tvar ids []int\n\tif err := irq.Select(inspectionresult.FieldID).Scan(ctx, &ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ids, nil\n}",
"func (db *PGX) getQueryInt(sql string, arguments ...interface{}) (result int, err error) {\n\terr = db.Conn.QueryRow(context.Background(), sql, arguments...).Scan(&result)\n\tif err != nil {\n\t\tdb.log.Printf(\"error : getQueryInt(%s) queryRow unexpectedly failed. args : (%v), error : %v\\n\", sql, arguments, err)\n\t\treturn 0, err\n\t}\n\treturn result, err\n}",
"func (dq *DentalkindQuery) IDs(ctx context.Context) ([]int, error) {\n\tvar ids []int\n\tif err := dq.Select(dentalkind.FieldID).Scan(ctx, &ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ids, nil\n}",
"func (rq *ResearchQuery) IDs(ctx context.Context) ([]int, error) {\n\tvar ids []int\n\tif err := rq.Select(research.FieldID).Scan(ctx, &ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ids, nil\n}",
"func (ulq *UrgencyLevelQuery) IDs(ctx context.Context) ([]int, error) {\n\tvar ids []int\n\tif err := ulq.Select(urgencylevel.FieldID).Scan(ctx, &ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ids, nil\n}",
"func (ibrq *IncidentBasicRuleQuery) IDs(ctx context.Context) ([]uint, error) {\n\tvar ids []uint\n\tif err := ibrq.Select(incidentbasicrule.FieldID).Scan(ctx, &ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ids, nil\n}",
"func (drq *DataRoomQuery) IDs(ctx context.Context) ([]int, error) {\n\tvar ids []int\n\tif err := drq.Select(dataroom.FieldID).Scan(ctx, &ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ids, nil\n}",
"func (rcq *RecommendationsCategoryQuery) IDs(ctx context.Context) ([]int, error) {\n\tvar ids []int\n\tif err := rcq.Select(recommendationscategory.FieldID).Scan(ctx, &ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ids, nil\n}",
"func (ahq *AlertHistoryQuery) IDs(ctx context.Context) ([]int64, error) {\n\tvar ids []int64\n\tif err := ahq.Select(alerthistory.FieldID).Scan(ctx, &ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ids, nil\n}",
"func (g *GrabBag) IntSlice(query string) []int {\n\tres := g.Grab(query)\n\tif res != nil {\n\t\treturn res.([]int)\n\t}\n\treturn []int{}\n}",
"func (oq *OfficeroomQuery) IDs(ctx context.Context) ([]int, error) {\n\tvar ids []int\n\tif err := oq.Select(officeroom.FieldID).Scan(ctx, &ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ids, nil\n}",
"func (oq *OfficeQuery) IDs(ctx context.Context) ([]int, error) {\n\tvar ids []int\n\tif err := oq.Select(office.FieldID).Scan(ctx, &ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ids, nil\n}",
"func (jmq *JDModelQuery) IDs(ctx context.Context) ([]int, error) {\n\tvar ids []int\n\tif err := jmq.Select(jdmodel.FieldID).Scan(ctx, &ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ids, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
sqlQueryStringIDs takes a repostory ID, a SQL statement and applies the args returning a list of string type id or error. | func sqlQueryStringIDs(config *Config, repoID string, stmt string, args ...interface{}) ([]string, error) {
if db, ok := config.Connections[repoID]; ok {
rows, err := db.Query(stmt, args...)
if err != nil {
return nil, fmt.Errorf("ERROR: query error (%q), %s", repoID, err)
}
defer rows.Close()
value := ``
values := []string{}
for rows.Next() {
err := rows.Scan(&value)
if err == nil {
values = append(values, value)
} else {
return nil, fmt.Errorf("ERROR: scan error (%q), %q, %s", repoID, stmt, err)
}
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("ERROR: rows error (%q), %s", repoID, err)
}
if err != nil {
return nil, fmt.Errorf("ERROR: query error (%q), %s", repoID, err)
}
return values, nil
}
return nil, fmt.Errorf("bad request")
} | [
"func sqlQueryIntIDs(config *Config, repoID string, stmt string, args ...interface{}) ([]int, error) {\n\tif db, ok := config.Connections[repoID]; ok {\n\t\trows, err := db.Query(stmt, args...)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ERROR: query error (%q), %s\", repoID, err)\n\t\t}\n\t\tdefer rows.Close()\n\t\tvalue := 0\n\t\tvalues := []int{}\n\t\tfor rows.Next() {\n\t\t\terr := rows.Scan(&value)\n\t\t\tif (err == nil) && (value > 0) {\n\t\t\t\tvalues = append(values, value)\n\t\t\t} else {\n\t\t\t\treturn nil, fmt.Errorf(\"ERROR: scan error (%q), %s\", repoID, err)\n\t\t\t}\n\t\t}\n\t\tif err := rows.Err(); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ERROR: rows error (%q), %s\", repoID, err)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ERROR: query error (%q), %s\", repoID, err)\n\t\t}\n\t\treturn values, nil\n\t}\n\treturn nil, fmt.Errorf(\"bad request\")\n}",
"func sqlQueryInts(config *Config, repoID string, stmt string) ([]int, error) {\n\tif db, ok := config.Connections[repoID]; ok {\n\t\trows, err := db.Query(stmt)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ERROR: query error (%q), %s\", repoID, err)\n\t\t}\n\t\tdefer rows.Close()\n\t\tvalue := 0\n\t\tvalues := []int{}\n\t\tfor rows.Next() {\n\t\t\terr := rows.Scan(&value)\n\t\t\tif err == nil {\n\t\t\t\tvalues = append(values, value)\n\t\t\t} else {\n\t\t\t\treturn nil, fmt.Errorf(\"ERROR: scan error (%q), %s\", repoID, err)\n\t\t\t}\n\t\t}\n\t\tif err := rows.Err(); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ERROR: rows error (%q), %s\", repoID, err)\n\t\t}\n\t\treturn values, nil\n\t}\n\treturn nil, fmt.Errorf(\"bad request\")\n}",
"func AddSQLQueryHandlers(op *parser.OPFactory) {\n\t//these are used to generate the where clause statement section\n\top.Add(\"id\", func(name string, c parser.Collector) ([]string, error) {\n\n\t\tif !c.Has(\"value\") {\n\t\t\treturn nil, ErrNoValue\n\t\t}\n\n\t\tval := c.Get(\"value\")\n\t\treturn []string{fmt.Sprintf(\"{{table}}.%s = %s\", name, val)}, nil\n\t})\n\n\top.Add(\"gte\", func(name string, c parser.Collector) ([]string, error) {\n\n\t\tif !c.Has(\"value\") {\n\t\t\treturn nil, ErrNoValue\n\t\t}\n\n\t\tval := c.Get(\"value\").(int)\n\t\treturn []string{fmt.Sprintf(\"{{table}}.%s => %d\", name, val)}, nil\n\t})\n\n\top.Add(\"gt\", func(name string, c parser.Collector) ([]string, error) {\n\n\t\tif !c.Has(\"value\") {\n\t\t\treturn nil, ErrNoValue\n\t\t}\n\n\t\tval := c.Get(\"value\").(int)\n\t\treturn []string{fmt.Sprintf(\"{{table}}.%s > %d\", name, val)}, nil\n\t})\n\n\top.Add(\"lte\", func(name string, c parser.Collector) ([]string, error) {\n\n\t\tif !c.Has(\"value\") {\n\t\t\treturn nil, ErrNoValue\n\t\t}\n\n\t\tval := c.Get(\"value\").(int)\n\t\treturn []string{fmt.Sprintf(\"{{table}}.%s <= %d\", name, val)}, nil\n\t})\n\n\top.Add(\"lt\", func(name string, c parser.Collector) ([]string, error) {\n\n\t\tif !c.Has(\"value\") {\n\t\t\treturn nil, ErrNoValue\n\t\t}\n\n\t\tval := c.Get(\"value\").(int)\n\t\treturn []string{fmt.Sprintf(\"{{table}}.%s < %d\", name, val)}, nil\n\t})\n\n\top.Add(\"in\", func(name string, c parser.Collector) ([]string, error) {\n\n\t\tif !c.Has(\"range\") {\n\t\t\treturn nil, ErrNoValue\n\t\t}\n\n\t\tvar inwords []string\n\t\tranges := c.Get(\"range\").([]string)\n\n\t\tfor _, ins := range ranges {\n\t\t\tinwords = append(inwords, fmt.Sprintf(\"{{table}}.%s = %s\", name, ins))\n\t\t}\n\n\t\treturn []string{strings.Join(inwords, \"\\nOR \")}, nil\n\t})\n\n\top.Add(\"is\", func(name string, c parser.Collector) ([]string, error) {\n\n\t\tif !c.Has(\"value\") {\n\t\t\treturn nil, ErrNoValue\n\t\t}\n\n\t\tval := c.Get(\"value\")\n\n\t\treturn []string{fmt.Sprintf(\"{{table}}.%s = %s\", name, val)}, nil\n\t})\n\n\top.Add(\"isnot\", func(name string, c parser.Collector) ([]string, error) {\n\n\t\tif !c.Has(\"value\") {\n\t\t\treturn nil, ErrNoValue\n\t\t}\n\n\t\tval := c.Get(\"value\")\n\t\treturn []string{fmt.Sprintf(\"{{table}}.%s != %s\", name, val)}, nil\n\t})\n\n\top.Add(\"range\", func(name string, c parser.Collector) ([]string, error) {\n\n\t\tif !c.Has(\"max\") && !c.Has(\"min\") {\n\t\t\treturn nil, ErrNoValue\n\t\t}\n\n\t\tmax := c.Get(\"max\")\n\t\tmaxso := fmt.Sprintf(\"{{table}}.%s => %d\", name, max)\n\n\t\tmin := c.Get(\"min\")\n\t\tminso := fmt.Sprintf(\"{{table}}.%s <= %d\", name, min)\n\n\t\t// orange := strings.Join([]string{minso, maxso}, \"\\nOR\\n\")\n\n\t\treturn []string{minso, maxso}, nil\n\t})\n}",
"func SqlR(db *sql.DB) func(query string, args ...interface{}) (*ResultSet, error) {\n\treturn (&dbhandler{db: db, stmt: make(map[string]*sql.Stmt)}).sqlr\n}",
"func executeQueryRequestFromStrings(s []string) *command.ExecuteQueryRequest {\n\tstmts := make([]*command.Statement, len(s))\n\tfor i := range s {\n\t\tstmts[i] = &command.Statement{\n\t\t\tSql: s[i],\n\t\t}\n\t}\n\treturn &command.ExecuteQueryRequest{\n\t\tRequest: &command.Request{\n\t\t\tStatements: stmts,\n\t\t\tTransaction: false,\n\t\t},\n\t\tTimings: false,\n\t}\n}",
"func (o *DeleteRulesV1Params) bindParamIds(formats strfmt.Registry) []string {\n\tidsIR := o.Ids\n\n\tvar idsIC []string\n\tfor _, idsIIR := range idsIR { // explode []string\n\n\t\tidsIIV := idsIIR // string as string\n\t\tidsIC = append(idsIC, idsIIV)\n\t}\n\n\t// items.CollectionFormat: \"multi\"\n\tidsIS := swag.JoinByFormat(idsIC, \"multi\")\n\n\treturn idsIS\n}",
"func SQLUpdateArgsString(columns []interface{}) (r string) {\n\tfor i, col := range columns {\n\t\tif i > 0 {\n\t\t\tr += \", \"\n\t\t}\n\t\tr += col.(string) + \"=$\" + strconv.Itoa(i+1)\n\t}\n\treturn\n}",
"func StatementToStringArray(sql *signal.SqlStatement) []string {\n\ts := make([]string, len(sql.GetParameters()))\n\tfor i, p := range sql.GetParameters() {\n\t\tif p.IntegerParameter != nil {\n\t\t\ts[i] = strconv.Itoa(int(*p.IntegerParameter))\n\t\t} else if p.StringParameter != nil {\n\t\t\ts[i] = *p.StringParameter\n\t\t}\n\t}\n\treturn s\n}",
"func (grq *GithubRepositoryQuery) IDs(ctx context.Context) (ids []int, err error) {\n\tif grq.ctx.Unique == nil && grq.path != nil {\n\t\tgrq.Unique(true)\n\t}\n\tctx = setContextOp(ctx, grq.ctx, \"IDs\")\n\tif err = grq.Select(githubrepository.FieldID).Scan(ctx, &ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ids, nil\n}",
"func SplitSqls(sqls string, separate rune) []string {\n\tsubs := make([]string, 0)\n\n\tinQuoted := false\n\tpos := 0\n\tl := len(sqls)\n\n\tvar runeValue rune\n\tfor i, w := 0, 0; i < l; i += w {\n\t\truneValue, w = utf8.DecodeRuneInString(sqls[i:])\n\n\t\tvar nextRuneValue rune\n\n\t\tnextWidth := 0\n\n\t\tif i+w < l {\n\t\t\tnextRuneValue, nextWidth = utf8.DecodeRuneInString(sqls[i+w:])\n\t\t}\n\n\t\tjumpNext := false\n\n\t\tswitch {\n\t\tcase runeValue == '\\\\':\n\t\t\tjumpNext = true\n\t\tcase runeValue == '\\'':\n\t\t\tif inQuoted && nextWidth > 0 && nextRuneValue == '\\'' {\n\t\t\t\tjumpNext = true // jump escape for literal apostrophe, or single quote\n\t\t\t} else {\n\t\t\t\tinQuoted = !inQuoted\n\t\t\t}\n\t\tcase !inQuoted && runeValue == separate:\n\t\t\tsubs = tryAddSQL(subs, sqls[pos:i])\n\t\t\tpos = i + w\n\t\t}\n\n\t\tif jumpNext {\n\t\t\ti += w + nextWidth\n\t\t}\n\t}\n\n\tif pos < l {\n\t\tsubs = tryAddSQL(subs, sqls[pos:])\n\t}\n\n\treturn subs\n}",
"func SQLInsertArgsString(argc int) (r string) {\n\taccString := func(acc string, val int) string {\n\t\treturn acc + \"$\" + strconv.Itoa(val)\n\t}\n\tif argc < 1 {\n\t\treturn\n\t}\n\tl := make([]int, argc-1)\n\tfor i := range l {\n\t\tr = accString(r, i+1)\n\t\tif argc > 1 {\n\t\t\tr += \", \"\n\t\t}\n\t}\n\tr = accString(r, argc)\n\treturn\n}",
"func (rq *RevisionQuery) IDs(ctx context.Context) ([]string, error) {\n\tvar ids []string\n\tctx = setContextOp(ctx, rq.ctx, \"IDs\")\n\tif err := rq.Select(revision.FieldID).Scan(ctx, &ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ids, nil\n}",
"func (sqlbldr *Builder) SQLargs() []interface{} {\n\treturn sqlbldr.myOrdQueryArgs\n}",
"func convertArguments(v interface{}) ([]*sqlf.Query, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\n\tif conditions, ok := v.([]*sqlf.Query); ok {\n\t\treturn conditions, nil\n\t}\n\n\treturn nil, ErrNotConditions\n}",
"func (sbc *SandboxConn) StringQueries() []string {\n\tresult := make([]string, len(sbc.Queries))\n\tfor i, query := range sbc.Queries {\n\t\tresult[i] = query.Sql\n\t}\n\treturn result\n}",
"func (grq *GithubReleaseQuery) IDs(ctx context.Context) (ids []int, err error) {\n\tif grq.ctx.Unique == nil && grq.path != nil {\n\t\tgrq.Unique(true)\n\t}\n\tctx = setContextOp(ctx, grq.ctx, \"IDs\")\n\tif err = grq.Select(githubrelease.FieldID).Scan(ctx, &ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ids, nil\n}",
"func (r RankingData) QueryIds() []int {\n\tif r.ids == nil {\n\t\tfor qid := range r.data {\n\t\t\tr.ids = append(r.ids, qid)\n\t\t}\n\t}\n\treturn r.ids\n}",
"func RangesToQuery(ranges []Range) string {\n\tvar rs []string\n\tfor _, r := range ranges {\n\t\trs = append(rs, fmt.Sprintf(\"#%d-%d\", r.Begin, r.End))\n\t}\n\treturn strings.Join(rs, \",\")\n}",
"func POSTGRESQL(q *Query) (string, []interface{}) {\n\tb := &sqlBuilder{sprintArg: numberedArg}\n\treturn b.query(q)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
IsPublic takes an EPrintID and returns true if public, false otherwise Check if an EPrint record "is public" | func IsPublic(config *Config, repoID string, eprintid int) (bool, error) {
if db, ok := config.Connections[repoID]; ok {
stmt := `SELECT IFNULL(eprint_status, '') AS status, IFNULL(metadata_visibility, '') AS visibility FROM eprint WHERE eprintid = ? LIMIT 1`
rows, err := db.Query(stmt, eprintid)
if err != nil {
return false, fmt.Errorf("ERROR: query error (%q), %s", repoID, err)
}
defer rows.Close()
var (
status, visibility string
)
for rows.Next() {
err := rows.Scan(&status, &visibility)
if err != nil {
return false, fmt.Errorf("ERROR: scan error (%q), %q, %s", repoID, stmt, err)
}
}
if err := rows.Err(); err != nil {
return false, fmt.Errorf("ERROR: rows error (%q), %s", repoID, err)
}
if err != nil {
return false, fmt.Errorf("ERROR: query error (%q), %s", repoID, err)
}
return ((status == "archive") && (visibility == "show")), nil
}
return false, fmt.Errorf("bad request")
} | [
"func (c *ExternalService) IsPublic() bool {\n\treturn false\n}",
"func (m *Measurement) IsPublic() bool {\n return m.data.IsPublic\n}",
"func IsPublic(req *http.Request) bool {\n\tif req == nil {\n\t\treturn true\n\t}\n\treturn req.Header.Get(headerXPublic) == \"true\"\n}",
"func (o AccessPointPolicyStatusOutput) IsPublic() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v AccessPointPolicyStatus) *bool { return v.IsPublic }).(pulumi.BoolPtrOutput)\n}",
"func IsPublic(r *http.Request) bool {\n\tif r.Method == \"GET\" && strings.HasPrefix(r.URL.RequestURI(), \"/public/\") {\n\t\treturn true\n\t}\n\treturn false\n}",
"func imageIsPublic(image uint) bool {\n\tvar metadata ImageMetadata\n\tresult := DB.Model(ImageMetadata{}).Where(\"id = ?\", image).First(&metadata)\n\treturn (result.Error == nil) && (!metadata.Private)\n}",
"func (c *Client) IsPublic() bool {\n\treturn c.Public\n}",
"func checkPublic(c *gin.Context) bool {\n\treturn c.Request.Header.Get(\"X-Public\") == \"true\"\n}",
"func (server *Server) IsPublic() bool {\n\t// TODO: Just return the if statement instead of checking then creating a false to pass through\n\t// TODO: don't count entire strings if you just need to check the value of a single index\n\tif len(server.Config.StringValue(\"RegisterName\")) == 0 {\n\t\treturn false\n\t}\n\tif len(server.Config.StringValue(\"RegisterHost\")) == 0 {\n\t\treturn false\n\t}\n\tif len(server.Config.StringValue(\"RegisterPassword\")) == 0 {\n\t\treturn false\n\t}\n\tif len(server.Config.StringValue(\"RegisterWebURL\")) == 0 {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (p Path) IsPublic() bool {\n\treturn p == \"hush-configuration/salt\" ||\n\t\tp == \"hush-tree-checksum\"\n}",
"func isOpenToPublic(openState state.OpenValue) bool {\n\treturn openState == state.OPEN || openState == state.OPEN_PLUS\n}",
"func (o *SyntheticsGlobalVariable) HasParseTestPublicId() bool {\n\tif o != nil && o.ParseTestPublicId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (m *MessageLinkInfo) GetIsPublic() (value bool) {\n\tif m == nil {\n\t\treturn\n\t}\n\treturn m.IsPublic\n}",
"func IsPublicCmdID(cmdID protocol.PacketType) bool {\n\treturn !IsPrivateCmdID(cmdID)\n}",
"func isPublicPath(Path string) bool {\n\tfor _, path := range PublicPath {\n\t\tif Path == path {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (p *Permission) CheckPublicScope(userId string, scopeType apistructs.ScopeType, scopeId int64) (bool, error) {\n\tswitch scopeType {\n\tcase apistructs.OrgScope:\n\t\torg, err := p.db.GetOrg(scopeId)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn org.IsPublic, nil\n\tcase apistructs.ProjectScope:\n\t\tproject, err := p.db.GetProjectByID(scopeId)\n\t\tif err != nil || !project.IsPublic {\n\t\t\treturn false, err\n\t\t}\n\t\t// check if in upper level\n\t\tmember, err := p.db.GetMemberByScopeAndUserID(userId, apistructs.OrgScope, project.OrgID)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif len(member) > 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\t// if not, check upper level isPublic\n\t\torg, err := p.db.GetOrg(project.OrgID)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn org.IsPublic, nil\n\tcase apistructs.AppScope:\n\t\tapp, err := p.db.GetApplicationByID(scopeId)\n\t\tif err != nil || !app.IsPublic {\n\t\t\treturn false, err\n\t\t}\n\t\tmember, err := p.db.GetMemberByScopeAndUserID(userId, apistructs.ProjectScope, app.ProjectID)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif len(member) > 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\tproject, err := p.db.GetProjectByID(app.ProjectID)\n\t\tif err != nil || !project.IsPublic {\n\t\t\treturn false, err\n\t\t}\n\t\tmember, err = p.db.GetMemberByScopeAndUserID(userId, apistructs.OrgScope, project.OrgID)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif len(member) > 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\torg, err := p.db.GetOrg(project.OrgID)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn org.IsPublic, nil\n\t}\n\treturn true, nil\n}",
"func IsPublicMessage(e *irc.Event) bool {\n\tif IsCommandMessage(e) || IsPrivateMessage(e) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (n Name) HasPublicSuffix() bool {\n\treturn len(n.labels) > 1 && n.category != eTLDUndefined\n}",
"func GetPub(r *http.Request) (bool, error) {\n\tval := r.PostFormValue(\"public\")\n\tif val == \"\" {\n\t\t// No public/private variable found\n\t\treturn false, errors.New(\"No public/private value present\")\n\t}\n\tpub, err := strconv.ParseBool(val)\n\tif err != nil {\n\t\tlog.Printf(\"Error when converting public value to boolean: %v\\n\", err)\n\t\treturn false, err\n\t}\n\n\treturn pub, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
EPrint User Info GetUsernames returns a list of all usernames in a repository | func GetUsernames(config *Config, repoID string, userids ...int) ([]string, error) {
stmt := `SELECT username FROM user ORDER BY userid`
return sqlQueryStringIDs(config, repoID, stmt)
} | [
"func GetAllUserNames() []string {\n\treturn usernames\n}",
"func (api *API) getUsernames(ctx *gin.Context) {\n\tusernames, err := api.database.GetAllUsernames()\n\tif api.check(err, ctx) {\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusOK, gr(usernames))\n}",
"func SearchUserNames(search string) ([]string, error) {\n\tusernames := make([]string, 0)\n\trows, err := db.Query(\"SELECT username FROM gowncloud.users WHERE username LIKE $1 || '%'\", search)\n\tif err != nil {\n\t\tlog.Error(\"Failed to search for usernames starting with \", search)\n\t\treturn nil, ErrDB\n\t}\n\tif rows == nil {\n\t\tlog.Error(\"Failed to get usernames\")\n\t\treturn nil, ErrDB\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar username string\n\t\terr = rows.Scan(&username)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Error while reading usernames\")\n\t\t\treturn nil, ErrDB\n\t\t}\n\t\tusernames = append(usernames, username)\n\t}\n\terr = rows.Err()\n\tif err != nil {\n\t\tlog.Error(\"Error while reading the usernames rows\")\n\t\treturn nil, ErrDB\n\t}\n\treturn usernames, nil\n}",
"func GetAllUserName(db *sql.DB) []string {\n\tvar Allusername []string\n\t//db, err := connectdatabase()\n\t// Query all users\n\tif db == nil {\n\n\t\tlog.Print(\"can not connect to database!\")\n\t\treturn nil\n\t}\n\t//defer db.Close()\n\n\trows, err := db.Query(\"select username from USERS\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor rows.Next() {\n\t\tvar username string\n\t\terr := rows.Scan(&username)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tAllusername = append(Allusername, username)\n\t}\n\tdefer rows.Close()\n\treturn Allusername\n}",
"func (g *Group) GetUserNames() []string {\n\tif g == nil || g.UserNames == nil {\n\t\treturn nil\n\t}\n\treturn *g.UserNames\n}",
"func (b *LocalBackend) getSSHUsernames(req *tailcfg.C2NSSHUsernamesRequest) (*tailcfg.C2NSSHUsernamesResponse, error) {\n\tres := new(tailcfg.C2NSSHUsernamesResponse)\n\tif !b.tailscaleSSHEnabled() {\n\t\treturn res, nil\n\t}\n\n\tmax := 10\n\tif req != nil && req.Max != 0 {\n\t\tmax = req.Max\n\t}\n\n\tadd := func(u string) {\n\t\tif req != nil && req.Exclude[u] {\n\t\t\treturn\n\t\t}\n\t\tswitch u {\n\t\tcase \"nobody\", \"daemon\", \"sync\":\n\t\t\treturn\n\t\t}\n\t\tif slices.Contains(res.Usernames, u) {\n\t\t\treturn\n\t\t}\n\t\tif len(res.Usernames) > max {\n\t\t\t// Enough for a hint.\n\t\t\treturn\n\t\t}\n\t\tres.Usernames = append(res.Usernames, u)\n\t}\n\n\tif opUser := b.operatorUserName(); opUser != \"\" {\n\t\tadd(opUser)\n\t}\n\n\t// Check popular usernames and see if they exist with a real shell.\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\t\tout, err := exec.Command(\"dscl\", \".\", \"list\", \"/Users\").Output()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlineread.Reader(bytes.NewReader(out), func(line []byte) error {\n\t\t\tline = bytes.TrimSpace(line)\n\t\t\tif len(line) == 0 || line[0] == '_' {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tadd(string(line))\n\t\t\treturn nil\n\t\t})\n\tdefault:\n\t\tlineread.File(\"/etc/passwd\", func(line []byte) error {\n\t\t\tline = bytes.TrimSpace(line)\n\t\t\tif len(line) == 0 || line[0] == '#' || line[0] == '_' {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif mem.HasSuffix(mem.B(line), mem.S(\"/nologin\")) ||\n\t\t\t\tmem.HasSuffix(mem.B(line), mem.S(\"/false\")) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tcolon := bytes.IndexByte(line, ':')\n\t\t\tif colon != -1 {\n\t\t\t\tadd(string(line[:colon]))\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\treturn res, nil\n}",
"func GetUser() []User {\n\treturn userList\n}",
"func GetAllPersonNames(config *Config, repoID string, field string) ([]string, error) {\n\tstmt := fmt.Sprintf(`SELECT CONCAT(%s_family, \"/\", %s_given) AS %s\nFROM eprint_%s\nWHERE (%s_family IS NOT NULL) OR (%s_given IS NOT NULL)\nGROUP BY %s_family, %s_given ORDER BY %s_family, %s_given`,\n\t\tfield, field, field,\n\t\tfield, field, field, field, field, field, field)\n\treturn sqlQueryStringIDs(config, repoID, stmt)\n}",
"func (ic IrcContext) UserIDsToNames(userIDs ...string) []string {\n\tvar names []string\n\t// TODO implement using ic.GetUsers() instead\n\tallUsers := ic.GetUsers(true)\n\tusersMap := make(map[string]slack.User, len(allUsers))\n\tfor _, user := range allUsers {\n\t\tusersMap[user.ID] = user\n\t}\n\tfor _, uid := range userIDs {\n\t\tuser, ok := usersMap[uid]\n\t\tif !ok {\n\t\t\tnames = append(names, uid)\n\t\t\tlog.Printf(\"Could not fetch user %s, not in user map\", uid)\n\t\t} else {\n\t\t\tnames = append(names, user.Name)\n\t\t\tlog.Printf(\"Fetched info for user ID %s: %s\", uid, user.Name)\n\t\t}\n\t}\n\treturn names\n}",
"func getAllUsers() []user {\n\treturn userList\n}",
"func (u *UserService) ListByUsernames(usernames []string) ([]*model.User, error) {\n\tusers, appErr := u.api.GetUsersByUsernames(usernames)\n\n\treturn users, normalizeAppErr(appErr)\n}",
"func (repository UserRepository) ListUsers(description string) ([]models.User, error) {\n\n\tdescription = fmt.Sprintf(\"%%%s%%\", description)\n\n\tresultSet, error := repository.db.Query(\n\t\t\"select u.id, u.name, u.nick, u.email, u.created_at from users u where u.name LIKE ? or u.nick LIKE ?\",\n\t\tdescription, description)\n\tif error != nil {\n\t\treturn nil, error\n\t}\n\tdefer resultSet.Close()\n\n\tvar users []models.User\n\n\tfor resultSet.Next() {\n\t\tvar user models.User\n\t\tif error = resultSet.Scan(&user.ID, &user.Name, &user.Nick, &user.Email, &user.CreatedAt, ); error != nil {\n\t\t\treturn nil, error\n\t\t}\n\t\tusers = append(users, user)\n\t}\n\n\treturn users, nil\n}",
"func (u *users) List() ([]string, error) {\n\tres, err := u.c.baseRequest(http.MethodGet, routes.users, nil)\n\t//res, err := c.session.Get(u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar r userListResponse\n\tres.JSON(&r)\n\treturn r.Ocs.Data.Users, nil\n}",
"func userStrings() ([]string, error) {\n\tcmd := exec.Command(\"/usr/bin/getent\", \"passwd\")\n\tres, err := cmd.Output()\n\tif err != nil {\n\t\tfmt.Println(\"Error processing the passwd list\")\n\t\treturn nil, err\n\t}\n\tif len(res) == 0 {\n\t\treturn nil, fmt.Errorf(\"Cannot process empty string\")\n\t}\n\treturn strings.Split(string(res), string('\\n')), nil\n}",
"func getGroupUsers() ([]string, error) {\n\trows, err := db.Query(`SELECT username FROM track WHERE group=$1`, 1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tvar result []string\n\tvar user string\n\tfor rows.Next() {\n\t\t//err = rows.Scan(&user.group, &user.username, &user.id,\n\t\t//\t&user.oldAcc, &user.oldPP, &user.oldRank, &user.oldCRank)\n\n\t\terr = rows.Scan(&user)\n\t\tif err == nil {\n\t\t\tresult = append(result, user)\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn result, nil\n}",
"func readUsers(path string) []string {\n\tvar dirNames []string\n\twinBuiltinUsers := []string{\"Default\", \"Default User\", \"Public\", \"All Users\"}\n\tmacBuiltinUsers := []string{\"Guest\", \"Shared\"}\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed opening directory: %s\", err)\n\t}\n\tdefer file.Close()\n\tlist, _ := file.Readdirnames(0) // 0 to read all files and folders\n\tfor _, name := range list {\n\t\tinfo, err := os.Stat(filepath.Join(path, name))\n\t\tif err == nil {\n\t\t\tif info.IsDir() {\n\t\t\t\tif isWindows() {\n\t\t\t\t\tif !stringInSlice(name, winBuiltinUsers) {\n\t\t\t\t\t\tdirNames = append(dirNames, name)\n\t\t\t\t\t}\n\t\t\t\t} else if isMacos() {\n\t\t\t\t\tif !stringInSlice(name, macBuiltinUsers) {\n\t\t\t\t\t\tdirNames = append(dirNames, name)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tdirNames = append(dirNames, name)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn dirNames\n}",
"func getUsers() []string {\n\tusers := []string{}\n\tfor _, v := range clients {\n\t\tusers = append(users, v)\n\t}\n\n\t// sort the users\n\tsort.Strings(users)\n\n\treturn users\n}",
"func ListUsers(w http.ResponseWriter, r *http.Request) {\n\tqueryValue := strings.ToLower(r.URL.Query().Get(\"user\"))\n\n\tdb, err := database.Connect()\n\tif err != nil {\n\t\tutils.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\trepo := repositories.NewRepository(db)\n\tusers, err := repo.List(queryValue)\n\tif err != nil {\n\t\tutils.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tutils.JSON(w, http.StatusOK, users)\n}",
"func (s *RepoUserService) List(path string) ([]*api.User, error) {\n\treturn s.Lister.List(path)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetUserID takes a username and returns a list of userid | func GetUserID(config *Config, repoID string, username string) ([]int, error) {
stmt := `SELECT userid FROM user WHERE username = ?`
return sqlQueryIntIDs(config, repoID, stmt, username)
} | [
"func GetUsernames(config *Config, repoID string, userids ...int) ([]string, error) {\n\tstmt := `SELECT username FROM user ORDER BY userid`\n\treturn sqlQueryStringIDs(config, repoID, stmt)\n}",
"func (ic IrcContext) UserIDsToNames(userIDs ...string) []string {\n\tvar names []string\n\t// TODO implement using ic.GetUsers() instead\n\tallUsers := ic.GetUsers(true)\n\tusersMap := make(map[string]slack.User, len(allUsers))\n\tfor _, user := range allUsers {\n\t\tusersMap[user.ID] = user\n\t}\n\tfor _, uid := range userIDs {\n\t\tuser, ok := usersMap[uid]\n\t\tif !ok {\n\t\t\tnames = append(names, uid)\n\t\t\tlog.Printf(\"Could not fetch user %s, not in user map\", uid)\n\t\t} else {\n\t\t\tnames = append(names, user.Name)\n\t\t\tlog.Printf(\"Fetched info for user ID %s: %s\", uid, user.Name)\n\t\t}\n\t}\n\treturn names\n}",
"func (m *SchedulingGroup) GetUserIds()([]string) {\n val, err := m.GetBackingStore().Get(\"userIds\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]string)\n }\n return nil\n}",
"func GetUserID(user string, users []User) (id int, newID bool) {\n\tlastUserID := 0\n\tfor _, v := range users {\n\t\tif v.Name == user {\n\t\t\treturn v.ID, false\n\t\t}\n\t\tif v.ID > lastUserID {\n\t\t\tlastUserID = v.ID\n\t\t}\n\t}\n\treturn lastUserID + 1, true\n}",
"func (manager *UsersManager) UserIDList() []string {\n\ts := set.New()\n\tmanager.EachEntry(func(authyID string, publicKey string) {\n\t\ts.Add(authyID)\n\t})\n\n\treturn set.StringSlice(s)\n}",
"func GetClientUserIds(ctx *context.T, call security.Call) []string {\n\t// If there is no call or context, we must be the user.\n\tif ctx == nil || call == nil {\n\t\treturn []string{ServerUser}\n\t}\n\t// The convention is: the first 3 components of a blessing name are a user name\n\t// if the second component is a single character. Otherwise, use just the first\n\t// component.\n\tvar ids []string\n\trbn, _ := security.RemoteBlessingNames(ctx, call)\n\tfor _, b := range rbn {\n\t\tif c := ParseUserId(b); c != nil {\n\t\t\tids = append(ids, strings.Join(c, security.ChainSeparator))\n\t\t}\n\t}\n\t// If the client has our public key, we assume identity.\n\tif l, r := call.LocalBlessings().PublicKey(), call.RemoteBlessings().PublicKey(); l != nil && reflect.DeepEqual(l, r) {\n\t\tids = append(ids, ServerUser)\n\t}\n\tif len(ids) > 0 {\n\t\treturn ids\n\t}\n\treturn []string{UnauthenticatedUser}\n}",
"func (i *Interactor) getUsersIDs(ml []*Member) []string {\n\tids := make([]string, 0, len(ml))\n\tfor _, m := range ml {\n\t\tids = append(ids, m.UserID)\n\t}\n\treturn ids\n}",
"func UsernameToId(Id_user string) string {\n var userToEval []User\n var err error\n ses, c := accessDB(users_collection)\n defer ses.Close()\n err = c.Find(bson.M{\"username\":Id_user}).All(&userToEval)\n verifyErr(err)\n js, __ := json.Marshal(userToEval)\n verifyErr(__)\n fmt.Printf(\"\\nel usuario es: \\n%s\\n\\n\",js)\n if userToEval != nil {\n return userToEval[0].Id_user\n } else {\n return \"No existe\"\n }\n}",
"func LookUpUserIDByName(userName string) (string, error) {\n\tfor _, item := range users {\n\t\tif item.Name == userName {\n\t\t\treturn item.ID, nil\n\t\t}\n\t}\n\treturn \"\", errors.New(\"Could not get user by userName: \" + userName)\n}",
"func LookUpUserNameByID(userID string) (string, error) {\n\tfor _, item := range users {\n\t\tif item.ID == userID {\n\t\t\treturn item.Name, nil\n\t\t}\n\t}\n\treturn \"\", errors.New(\"Could not get user by userID: \" + userID)\n}",
"func getExperiencesByUserID(w http.ResponseWriter, r *http.Request) {\n\tvariables := mux.Vars(r)\n\tuserID := variables[\"id\"]\n\n\texperiences, err := obtainExperiencesFromSliceByUserID(userID)\n\tif err != nil {\n\t\tsendFailResponse(w, experiences)\n\t\treturn\n\t}\n\tsendSuccessResponse(w, experiences)\n}",
"func GetUsernameFromId(user_id string) (username string, err error) {\n\tqry := `SELECT username FROM t_ymux_user WHERE id = $1`\n\tdata := sizlib.SelData(DB, qry, user_id)\n\tif data == nil || len(data) == 0 {\n\t\terr = fmt.Errorf(\"Error(190532): Missing - no databases:\")\n\t\treturn\n\t}\n\tusername = data[0][\"username\"].(string)\n\t//for _, dd := range data {\n\t//\tdb = append(db, dd[\"datname\"].(string))\n\t//}\n\treturn\n}",
"func GetByUserID(this *server.Context) error {\n\tvar (\n\t\tuserid struct {\n\t\t\tUserID uint32 `json:\"userid\"`\n\t\t}\n\t)\n\n\tif err := this.JSONBody(&userid); err != nil {\n\t\tlogger.Error(\"GetByUserID json\", err)\n\t\treturn core.WriteStatusAndDataJSON(this, constants.ErrInvalidParam, nil)\n\t}\n\n\tlist, err := article.ArticleService.GetByUserID(userid.UserID)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn core.WriteStatusAndDataJSON(this, constants.ErrMongoDB, nil)\n\t}\n\n\treturn core.WriteStatusAndDataJSON(this, constants.ErrSucceed, list)\n}",
"func (this *Manager) GetUserList(appId uint32) ([]string) {\n this.Ulocker.RLock()\n defer this.Ulocker.RUnlock()\n\n uls := make([]string, 0)\n for _, c := range this.Users {\n if c.AppId == appId {\n uls = append(uls, c.UserId)\n }\n }\n\n log.Printf(\"-DBUG- users count: %d\", len(this.Users))\n\n return uls\n}",
"func (s SqlTeamStore) GetUserTeamIds(userId string, allowFromCache bool) ([]string, error) {\n\tteamIds := []string{}\n\tquery, args, err := s.getQueryBuilder().\n\t\tSelect(\"TeamId\").\n\t\tFrom(\"TeamMembers\").\n\t\tJoin(\"Teams ON TeamMembers.TeamId = Teams.Id\").\n\t\tWhere(sq.Eq{\"TeamMembers.UserId\": userId, \"TeamMembers.DeleteAt\": 0, \"Teams.DeleteAt\": 0}).ToSql()\n\n\tif err != nil {\n\t\treturn []string{}, errors.Wrap(err, \"team_tosql\")\n\t}\n\terr = s.GetReplicaX().Select(&teamIds, query, args...)\n\tif err != nil {\n\t\treturn []string{}, errors.Wrapf(err, \"failed to find TeamMembers with userId=%s\", userId)\n\t}\n\n\treturn teamIds, nil\n}",
"func getUserInfosByRoleID(roleID int) ([]User, error) {\n\tuserIDs := roleUserMap[roleID]\n\tuserList := []User{}\n\tfor _, userID := range userIDs {\n\t\tuser, err := getUserInfo(userID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tuserList = append(userList, user)\n\t}\n\treturn userList, nil\n}",
"func GetUserFromString(username string) Users {\n\tvar dbu []Users\n\tdb.Select(&dbu, db.Where(\"username\", \"=\", username))\n\n\tif len(dbu) > 0 {\n\t\treturn dbu[0]\n\t}\n\n\tdb.Select(&dbu, db.Where(\"lastfm\", \"=\", username))\n\n\tif len(dbu) > 0 {\n\t\treturn dbu[0]\n\t}\n\n\treturn Users{}\n}",
"func (a *AddChatMembersRequest) GetUserIDs() (value []int64) {\n\tif a == nil {\n\t\treturn\n\t}\n\treturn a.UserIDs\n}",
"func GetUserIDsByNames(names []string) []int64 {\n\tids := make([]int64, 0, len(names))\n\tfor _, name := range names {\n\t\tu, err := GetUserByName(name)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tids = append(ids, u.ID)\n\t}\n\treturn ids\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetUserBy takes a field name (e.g. userid, username) and value and returns an EPrintUser object. | func GetUserBy(config *Config, repoID string, queryField string, queryValue interface{}) (*EPrintUser, error) {
var (
year, month, day, hour, minute, second int
hideEMail string
)
if db, ok := config.Connections[repoID]; ok {
stmt := fmt.Sprintf(`SELECT userid, username, usertype, IFNULL(name_honourific, '') AS honourific, IFNULL(name_family, '') AS family, IFNULL(name_given, '') AS given, IFNULL(name_lineage, '') AS lineage, IFNULL(email, '') AS email, IFNULL(hideemail, '') AS hideemail, IFNULL(dept, '') AS dept, IFNULL(org, '') AS org, IFNULL(address, '') AS address, IFNULL(country, '') AS country, IFNULL(joined_year, 0) AS joined_year, IFNULL(joined_month, 0) AS joined_month, IFNULL(joined_day, 0) AS joined_day, IFNULL(joined_hour, 0) AS joined_hour, IFNULL(joined_minute, 0) AS joined_minute, IFNULL(joined_second, 0) AS joined_second FROM user WHERE %s = ? LIMIT 1`, queryField)
rows, err := db.Query(stmt, queryValue)
if err != nil {
return nil, fmt.Errorf("ERROR: query error (%q), %s", repoID, err)
}
defer rows.Close()
// Map values back into our object.
user := new(EPrintUser)
user.Name = new(Name)
for rows.Next() {
err := rows.Scan(&user.UserID, &user.Username, &user.Type,
&user.Name.Honourific, &user.Name.Family,
&user.Name.Given, &user.Name.Lineage,
&user.EMail, &hideEMail,
&user.Dept, &user.Org,
&user.Address, &user.Country,
&year, &month, &day, &hour, &minute, &second)
if err != nil {
return nil, fmt.Errorf("ERROR: scan error (%q), %q, %s", repoID, stmt, err)
}
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("ERROR: rows error (%q), %s", repoID, err)
}
if err != nil {
return nil, fmt.Errorf("ERROR: query error (%q), %s", repoID, err)
}
user.Joined = fmt.Sprintf(`%04d-%02d-%02d %02d:%02d:%02d`, year, month, day, hour, minute, second)
if strings.ToLower(hideEMail) == "true" {
user.HideEMail = true
} else {
user.HideEMail = false
}
return user, nil
}
return nil, fmt.Errorf(`bad request`)
} | [
"func (s *Store) getUserByField(ctx context.Context, field, value string) (*User, error) {\n\titer := s.Client.Collection(UsersCollection).Where(field, \"==\", value).Limit(1).Documents(ctx)\n\n\tsnapshot, err := iter.Next()\n\tif err == iterator.Done {\n\t\treturn nil, nil\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tuser := &User{}\n\tsnapshot.DataTo(user)\n\n\tif user.Deleted != nil {\n\t\treturn nil, nil\n\t}\n\n\treturn user, nil\n}",
"func (m *mocker) GetByField(ctx context.Context, key string, value interface{}) (users.User, error) {\n\tval := fmt.Sprintf(\"%+s\", value)\n\tfor _, record := range m.db {\n\t\tswitch strings.ToLower(key) {\n\t\tcase \"public_id\":\n\t\t\tif record.PublicID == val {\n\t\t\t\treturn record, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn users.User{}, ErrNotFound\n}",
"func (impl UserDBBackendImpl) GetByField(ctx context.Context, key string, value interface{}) (pkg.User, error) {\n\n\tret1, ret2 := impl.GetByFieldFunc(ctx, key, value)\n\treturn ret1, ret2\n\n}",
"func (u *UserRepo) GetUserByField(field, value string) (*model.User, error) {\n\tvar user model.User\n\terr := u.DB.Model(&user).Where(fmt.Sprintf(\"%v = ?\", field), value).First()\n\n\treturn &user, err\n}",
"func GetUserByParams(ctx *context.Context) *models.User {\n\treturn GetUserByName(ctx, ctx.Params(\":username\"))\n}",
"func (u *Repository) getUserBy(columns map[string]string) (User, error) {\n\tvar user User\n\tvar query string = \"SELECT * FROM `users` WHERE 1=1\"\n\tfor key, value := range columns {\n\t\tquery += \" AND \" + key + \" = '\" + value + \"'\"\n\t}\n\tquery += \" Limit 1;\"\n\terr := u.DB.QueryRow(query).Scan(&user.ID, &user.FirstName, &user.LastName,\n\t\t&user.Image, &user.Cover, &user.Email, &user.Password)\n\tif err != nil {\n\t\treturn User{}, err\n\t}\n\treturn user, nil\n}",
"func (lc *HelperLDAP) UserInfoByField(username string, fieldOfUnicValue string) (map[string]string, liberr.Error) {\n\tvar (\n\t\terr liberr.Error\n\t\tsrc *ldap.SearchResult\n\t\tuserRes map[string]string\n\t)\n\n\tif username, err = lc.getUserName(username); err != nil {\n\t\treturn nil, err\n\t}\n\n\tuserRes = make(map[string]string)\n\tattributes := append(lc.Attributes, \"cn\")\n\n\tsrc, err = lc.runSearch(fmt.Sprintf(lc.config.FilterUser, fieldOfUnicValue, username), attributes)\n\n\tif err != nil {\n\t\treturn userRes, err\n\t}\n\n\tif len(src.Entries) != 1 {\n\t\tif len(src.Entries) > 1 {\n\t\t\treturn userRes, ErrorLDAPUserNotUniq.Error(nil)\n\t\t} else {\n\t\t\treturn userRes, ErrorLDAPUserNotFound.Error(nil)\n\t\t}\n\t}\n\n\tfor _, attr := range attributes {\n\t\tuserRes[attr] = src.Entries[0].GetAttributeValue(attr)\n\t}\n\n\tif _, ok := userRes[\"DN\"]; !ok {\n\t\tuserRes[\"DN\"] = src.Entries[0].DN\n\t}\n\n\tlc.getLogEntry(loglvl.DebugLevel, \"ldap user find success\").FieldAdd(\"ldap.user\", username).FieldAdd(\"ldap.map\", userRes).Log()\n\treturn userRes, nil\n}",
"func (c *Controller) GetUserByID(uid uint64) (user.User, error) {\n\tu := user.User{}\n\n\terr := c.QueryRow(`select * from users where id = $1;`,\n\t\tuid).Scan(&u.ID, &u.Name, &u.Balance)\n\tif err != nil {\n\t\treturn u, err\n\t}\n\n\treturn u, nil\n}",
"func (service *Service) GetUserByEmailOrUsername(request *restful.Request, response *restful.Response) {\n\tidentifier := request.PathParameter(\"identifier\")\n\n\tdbResult, err := service.server.GetUserByEmailOrUsername(identifier)\n\tif err == dao.ErrRecordNotFound {\n\t\trespondErr(response, http.StatusNotFound, messageDatabaseError,\n\t\t\t\"unable to retrieve user\")\n\n\t\tlogrus.WithFields(logrus.Fields{\"module\": \"service\", \"resp\": http.StatusNotFound}).\n\t\t\tError(\"Unable to retrieve user:\", err)\n\n\t\treturn\n\t}\n\tif err != nil {\n\t\trespondErr(response, http.StatusInternalServerError, messageDatabaseError,\n\t\t\t\"unable to retrieve user\")\n\n\t\tlogrus.WithFields(logrus.Fields{\"module\": \"service\", \"resp\": http.StatusInternalServerError}).\n\t\t\tError(\"Unable to retrieve user:\", err)\n\n\t\treturn\n\t}\n\n\tresult := &models.GetUserResponse{\n\t\tResult: *dbResult,\n\t}\n\n\twriteResponse(response, http.StatusOK, result)\n}",
"func GetUser(p graphql.ResolveParams) (interface{}, error) {\n\tidQuery, ok := p.Args[\"id\"].(string)\n\tif ok {\n\t\tusr := &models.User{}\n\t\tusr.SetID(idQuery)\n\t\tusr.FetchByID()\n\t\treturn usr, nil\n\t}\n\treturn nil, errors.New(\"User ID not Provided\")\n}",
"func getUser(path, value string) (*User, error) {\n\tctx := context.Background()\n\tclient, err := firestore.NewClient(ctx, projectID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer client.Close()\n\n\tvar user User\n\n\tmatchedUsers, err := client.Collection(\"Users\").\n\t\tWhere(path, \"==\", value).\n\t\tLimit(1).Documents(ctx).GetAll()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif matchedUsers == nil || len(matchedUsers) != 1 {\n\t\treturn nil, fmt.Errorf(\"User where `%v == %v` does not exist\", path, value)\n\t}\n\n\tif err := matchedUsers[0].DataTo(&user); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &user, nil\n}",
"func (users Users) GetByUsernameOrEmail(username, email string) (config.UserMain, string) {\n\n\tlog.Debug(\n\t\t\"Getting User By Username Or Email\",\n\t\tzap.String(\"Username\", username),\n\t\tzap.String(\"Email\", email),\n\t)\n\n\tfilter := users.getFilterByUsernameOrEmail(username, email)\n\tvar user config.UserMain\n\terr := users.mainCollection.FindOne(users.ctx, filter).Decode(&user)\n\n\tif err != nil {\n\t\treturn config.UserMain{}, config.InvalidUsernameOrEmailMsg\n\t}\n\n\tlog.Info(\n\t\t\"Got User By Username Or Email\",\n\t\tzap.String(\"Username\", username),\n\t\tzap.String(\"Email\", email),\n\t\tzap.Any(\"User\", user),\n\t)\n\n\treturn user, \"\"\n}",
"func (svc *userSvcImpl) GetByEmail(email string) (*User, *Error) {\n\tres, err := r.Table(\"Users\").GetAllByIndex(\"email\", email).Run(svc.session)\n\tif err != nil {\n\t\tfmt.Println(\"ERROR 1: \", err)\n\t\treturn nil, NewError(ErrDB, err)\n\t}\n\tif res.IsNil() {\n\t\treturn nil, nil\n\t}\n\n\tuser := &User{}\n\terr = res.One(user)\n\tif err != nil {\n\t\tfmt.Println(\"ERROR 2: \", err)\n\t\treturn nil, NewError(ErrDB, err)\n\t}\n\n\treturn user, nil\n}",
"func (wekan *Wekan) GetUserFromID(ctx context.Context, id UserID) (User, error) {\n\tvar user User\n\terr := wekan.db.Collection(\"users\").FindOne(ctx, bson.M{\n\t\t\"_id\": id,\n\t}).Decode(&user)\n\tif err != nil {\n\t\tif err == mongo.ErrNoDocuments {\n\t\t\treturn User{}, UserNotFoundError{key: string(\"id = \" + id)}\n\t\t}\n\t\treturn User{}, UnexpectedMongoError{err}\n\t}\n\treturn user, nil\n}",
"func GetUser(username string) (user User, err error) {\n\tsearchKey := datastore.NameKey(helpers.USERS, username, nil)\n\tif err = db.Get(ctx, searchKey, &user); err != nil {\n\t\tlog.Printf(\"unable to query user: %s\", username)\n\t\treturn\n\t}\n\treturn\n}",
"func (impl TwoFactorSessionDBBackendImpl) GetByField(ctx context.Context, key string, value interface{}) (pkg.TwoFactorSession, error) {\n\n\tret1, ret2 := impl.GetByFieldFunc(ctx, key, value)\n\treturn ret1, ret2\n\n}",
"func User(v string) predicate.Emp {\n\treturn predicate.Emp(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldUser), v))\n\t})\n}",
"func (s RedisStore) GetUser(id string) *User {\n\tdata, err := s.client.HGetAll(\"goplaxt:user:\" + id).Result()\n\t// FIXME - return err\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tupdated, err := time.Parse(\"01-02-2006\", data[\"updated\"])\n\t// FIXME - return err\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tuser := User{\n\t\tID: id,\n\t\tUsername: strings.ToLower(data[\"username\"]),\n\t\tAccessToken: data[\"access\"],\n\t\tRefreshToken: data[\"refresh\"],\n\t\tUpdated: updated,\n\t\tstore: s,\n\t}\n\n\treturn &user\n}",
"func (db *Database) GetUserById(id uint) (*model.User, error) {\r\n\tvar user model.User\r\n\r\n\tif err := db.First(&user, id).Error; err != nil {\r\n\t\tif gorm.IsRecordNotFoundError(err) {\r\n\t\t\treturn nil, errors.Wrap(err, \"user does not exist\")\r\n\t\t}\r\n\t\treturn nil, errors.Wrap(err, \"unable to get user\")\r\n\t}\r\n\treturn &user, nil\r\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
EPrint ID Lists GetAllEPrintIDs return a list of all eprint ids in repository or error | func GetAllEPrintIDs(config *Config, repoID string) ([]int, error) {
return sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint
ORDER BY date_year DESC, date_month DESC, date_day DESC`)
} | [
"func GetAllEPrintIDsWithStatus(config *Config, repoID string, status string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint WHERE (eprint_status = ?) ORDER BY date_year DESC, date_month DESC, date_day DESC`, status)\n}",
"func GetEPrintIDsForUniqueID(config *Config, repoID string, field string, value string) ([]int, error) {\n\t// NOTE: There should only be one eprint per DOI but we have dirty data because the field is not contrained as Unique\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE LOWER(%s) = LOWER(?)`, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, value)\n}",
"func GetEPrintIDsForYear(config *Config, repoID string, year int) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE date_type = \"published\" AND date_year = ? ORDER BY date_year DESC, date_month DESC, date_day DESC`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, year)\n}",
"func GetEPrintIDsForPersonOrOrgID(config *Config, repoID string, personOrOrgType string, personOrOrgID string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprint_%s_id.eprintid AS eprintid\nFROM eprint_%s_id JOIN eprint ON (eprint_%s_id.eprintid = eprint.eprintid)\nWHERE eprint_%s_id.%s_id = ?\nORDER BY date_year DESC, date_month DESC, date_day DESC`,\n\t\tpersonOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType)\n\treturn sqlQueryIntIDs(config, repoID, stmt, personOrOrgID)\n}",
"func GetEPrintIDsWithStatus(config *Config, repoID string, status string, start string, end string) ([]int, error) {\n\tstmt := `SELECT eprintid FROM eprint WHERE (eprint_status = ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 59), 2, \"0\")) <= ?)\nORDER BY lastmod_year DESC, lastmod_month DESC, lastmod_day DESC,\n lastmod_hour DESC, lastmod_minute DESC, lastmod_minute DESC`\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func GetEPrintIDsForORCID(config *Config, repoID string, orcid string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprint.eprintid AS eprintid\nFROM eprint_creators_orcid JOIN eprint ON (eprint_creators_orcid.eprintid = eprint.eprintid)\nWHERE creators_orcid = ?\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`, orcid)\n}",
"func ImportEPrints(config *Config, repoID string, ds *DataSource, eprints *EPrints) ([]int, error) {\n\tvar importErrors error\n\tids := []int{}\n\n\tif config.Connections == nil {\n\t\treturn nil, fmt.Errorf(`no databases are not configured`)\n\t}\n\t_, ok := config.Connections[repoID]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(`%s database connection not configured`, repoID)\n\t}\n\n\t// Check to make sure updates are allowed if non-Zero\n\t// eprint ids present.\n\tfor _, eprint := range eprints.EPrint {\n\t\tif eprint.EPrintID != 0 {\n\t\t\treturn nil, fmt.Errorf(\"create failed eprint id %d in %s\", eprint.EPrintID, repoID)\n\t\t}\n\t\tif eprint.Collection == \"\" && ds.DefaultCollection != \"\" {\n\t\t\teprint.Collection = DefaultCollection\n\t\t}\n\t\tif eprint.IDNumber == \"\" && ds.DefaultOfficialURL != \"\" {\n\t\t\teprint.IDNumber = GenerateIDNumber(eprint)\n\t\t}\n\t\tif eprint.OfficialURL == \"\" && ds.DefaultOfficialURL != \"\" {\n\t\t\teprint.OfficialURL = GenerateOfficialURL(eprint)\n\t\t}\n\t\tif eprint.Rights == \"\" && ds.DefaultRights != \"\" {\n\t\t\teprint.Rights = ds.DefaultRights\n\t\t}\n\t\tif eprint.Refereed == \"\" && eprint.Type == \"article\" &&\n\t\t\tds.DefaultRefereed != \"\" {\n\t\t\teprint.Refereed = ds.DefaultRefereed\n\t\t}\n\t\tif eprint.EPrintStatus == \"\" && ds.DefaultStatus != \"\" {\n\t\t\teprint.EPrintStatus = ds.DefaultStatus\n\t\t}\n\t\tif eprint.Abstract != \"\" && ds.StripTags {\n\t\t\tif cleaner.HasEncodedElements([]byte(eprint.Abstract)) {\n\t\t\t\teprint.Abstract = string(cleaner.StripTags([]byte(eprint.Abstract)))\n\t\t\t}\n\t\t}\n\t}\n\tfor _, eprint := range eprints.EPrint {\n\t\tid, err := SQLCreateEPrint(config, repoID, ds, eprint)\n\t\tif err != nil {\n\t\t\tif importErrors == nil {\n\t\t\t\timportErrors = err\n\t\t\t} else {\n\t\t\t\timportErrors = fmt.Errorf(\"%s; %s\", importErrors, err)\n\t\t\t}\n\t\t}\n\t\tids = append(ids, id)\n\t}\n\treturn ids, importErrors\n}",
"func GetEPrintIDsForDateType(config *Config, repoID string, dateType string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint\nWHERE ((date_type) = ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 1), 2, \"0\")) >= ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 28), 2, \"0\")) <= ?)\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, dateType, start, end)\n}",
"func GetEPrintIDsInTimestampRange(config *Config, repoID string, field string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 59), 2, \"0\")) <= ?)\nORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,\n\t\tfield, field, field, field, field, field, field, field, field, field, field, field,\n\t\tfield, field, field, field, field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, start, end)\n}",
"func GetEPrintIDsForPersonName(config *Config, repoID, field string, family string, given string) ([]int, error) {\n\tconditions := []string{}\n\tif strings.Contains(family, \"*\") || strings.Contains(given, \"%\") {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_family LIKE ?`, field))\n\t} else if family != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_family = ?`, field))\n\t}\n\tif strings.Contains(given, \"*\") || strings.Contains(given, \"%\") {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_given LIKE ?`, field))\n\t} else if given != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_given = ?`, field))\n\t}\n\tstmt := fmt.Sprintf(`SELECT eprint.eprintid AS eprintid\nFROM eprint_%s JOIN eprint ON (eprint_%s.eprintid = eprint.eprintid)\nWHERE %s\nORDER BY %s_family ASC, %s_given ASC, eprint.date_year DESC, eprint.date_month DESC, eprint.date_day DESC`,\n\t\tfield, field, strings.Join(conditions, \" AND \"), field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, family, given)\n}",
"func GetEPrintIDsWithStatusInTimestampRange(config *Config, repoID string, status string, field string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE\n(eprint_status = ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 59), 2, \"0\")) <= ?)\nORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,\n\t\tfield, field, field, field, field, field,\n\t\tfield, field, field, field, field, field,\n\t\tfield, field, field, field, field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func GetEPrintIDsWithStatusForDateType(config *Config, repoID string, status string, dateType string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint\nWHERE (eprint_status = ? ) AND (date_type = ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 1), 2, \"0\")) >= ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 28), 2, \"0\")) <= ?)\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, dateType, start, end)\n}",
"func GetAllPersonOrOrgIDs(config *Config, repoID string, field string) ([]string, error) {\n\tstmt := fmt.Sprintf(`SELECT %s_id FROM eprint_%s_id\nWHERE %s_id IS NOT NULL\nGROUP BY %s_id ORDER BY %s_id`, field, field, field, field, field)\n\treturn sqlQueryStringIDs(config, repoID, stmt)\n}",
"func (m MarvelGetCharactersResponse) IdList() []int {\n\tvar list []int\n\tfor _, user := range m.Data.Results {\n\t\tlist = append(list, user.ID)\n\t}\n\treturn list\n}",
"func (_Car *CarCaller) GetEmmisionInspectionsId(opts *bind.CallOpts) ([32]byte, error) {\n\tvar (\n\t\tret0 = new([32]byte)\n\t)\n\tout := ret0\n\terr := _Car.contract.Call(opts, out, \"getEmmisionInspectionsId\")\n\treturn *ret0, err\n}",
"func (oq *OfficeQuery) IDs(ctx context.Context) ([]int, error) {\n\tvar ids []int\n\tif err := oq.Select(office.FieldID).Scan(ctx, &ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ids, nil\n}",
"func (ahq *AlertHistoryQuery) IDs(ctx context.Context) ([]int64, error) {\n\tvar ids []int64\n\tif err := ahq.Select(alerthistory.FieldID).Scan(ctx, &ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ids, nil\n}",
"func (_Car *CarCallerSession) GetEmmisionInspectionsId() ([32]byte, error) {\n\treturn _Car.Contract.GetEmmisionInspectionsId(&_Car.CallOpts)\n}",
"func allTrackIDs(w http.ResponseWriter, r *http.Request) {\n\t// Connects to the database.\n\tdatabase := mongodb.DatabaseInit(Collection)\n\n\t// Gets the Count of Tracks in the DB.\n\tcount, _ := database.GetCount()\n\n\t// Check if there are any tracks in the DB.\n\tif count != 0 {\n\t\t// Slice of ints, to hold the IDs.\n\t\tvar idSlice []int\n\n\t\t// Gets all tracks from the database.\n\t\t// Loops through them, appending their ID to the new slice.\n\t\ttracks, _ := database.FindAll()\n\t\tfor i := 0; i < len(tracks); i++ {\n\t\t\tidSlice = append(idSlice, tracks[i].ID)\n\t\t}\n\n\t\t// Converts the struct to json.\n\t\tjson, err := json.Marshal(idSlice)\n\t\tif err != nil {\n\t\t\t// Sets header status code to 500 \"Internal server error\" and logs the error.\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tlog.Fatal(err)\n\t\t} else {\n\t\t\t// Sets header content-type to application/json and status code to 200 (OK).\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\t\t// Returns the array of IDs.\n\t\t\tw.Write([]byte(json))\n\t\t}\n\t} else {\n\t\t// There are no tracks stored in the DB.\n\t\t// Sets header content-type to application/json and status code to 404 (Not found).\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusNotFound)\n\n\t\t// Returns an empty array.\n\t\tw.Write([]byte(\"[]\"))\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetAllEPrintIDsWithStatus return a list of all eprint ids in a repository with a given status or return error | func GetAllEPrintIDsWithStatus(config *Config, repoID string, status string) ([]int, error) {
return sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint WHERE (eprint_status = ?) ORDER BY date_year DESC, date_month DESC, date_day DESC`, status)
} | [
"func GetEPrintIDsWithStatus(config *Config, repoID string, status string, start string, end string) ([]int, error) {\n\tstmt := `SELECT eprintid FROM eprint WHERE (eprint_status = ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 59), 2, \"0\")) <= ?)\nORDER BY lastmod_year DESC, lastmod_month DESC, lastmod_day DESC,\n lastmod_hour DESC, lastmod_minute DESC, lastmod_minute DESC`\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func GetEPrintIDsWithStatusForDateType(config *Config, repoID string, status string, dateType string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint\nWHERE (eprint_status = ? ) AND (date_type = ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 1), 2, \"0\")) >= ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 28), 2, \"0\")) <= ?)\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, dateType, start, end)\n}",
"func GetEPrintIDsWithStatusInTimestampRange(config *Config, repoID string, status string, field string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE\n(eprint_status = ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 59), 2, \"0\")) <= ?)\nORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,\n\t\tfield, field, field, field, field, field,\n\t\tfield, field, field, field, field, field,\n\t\tfield, field, field, field, field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func GetAllEPrintIDs(config *Config, repoID string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint\nORDER BY date_year DESC, date_month DESC, date_day DESC`)\n}",
"func GetEPrintIDsForYear(config *Config, repoID string, year int) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE date_type = \"published\" AND date_year = ? ORDER BY date_year DESC, date_month DESC, date_day DESC`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, year)\n}",
"func (u *Users) Status(ctx context.Context, st Status) ([]keys.ID, error) {\n\titer, err := u.ds.DocumentIterator(context.TODO(), indexKID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkids := make([]keys.ID, 0, 100)\n\tfor {\n\t\tdoc, err := iter.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif doc == nil {\n\t\t\tbreak\n\t\t}\n\t\tvar keyDoc keyDocument\n\t\tif err := json.Unmarshal(doc.Data, &keyDoc); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif keyDoc.Result != nil {\n\t\t\tif keyDoc.Result.Status == st {\n\t\t\t\tkids = append(kids, keyDoc.Result.User.KID)\n\t\t\t}\n\t\t}\n\t}\n\titer.Release()\n\n\treturn kids, nil\n}",
"func GetEPrintIDsForDateType(config *Config, repoID string, dateType string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint\nWHERE ((date_type) = ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 1), 2, \"0\")) >= ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 28), 2, \"0\")) <= ?)\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, dateType, start, end)\n}",
"func (hdl *DBHandler) GetStatus(info *Status) *[]Status {\n\tret := make([]Status, 0)\n\tquery := hdl.sqlDB\n\tif info.QuestionID > 0 {\n\t\tfmt.Println(\"NOT ZERO\")\n\t\tquery = query.Where(\"question_id=?\", info.QuestionID)\n\t}\n\tif info.UserID > 0 {\n\t\tquery = query.Where(\"user_id=?\", info.UserID)\n\t}\n\tquery.Order(\"ID DESC\").Find(&ret)\n\treturn &ret\n}",
"func (es *EmployeeService) Status(ctx context.Context, id int) (*EmployeeStatusResult, error) {\n\te := &EmployeeStatusResult{}\n\tvals := url.Values{}\n\tvals.Set(\"employeeId\", strconv.Itoa(id))\n\n\tif err := es.client.Request(ctx, http.MethodGet, employeeEndpoint.Action(status).Query(vals), nil, e); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn e, nil\n}",
"func GetTestsByStatus(statusList []string) ([]byte, error) {\n\tvar b []byte\n\tvar where string\n\tif len(statusList) == 0 {\n\t\tfmt.Println(\"no status provided in list\")\n\t\treturn b, fmt.Errorf(\"need to provide a status\")\n\t}\n\n\tfor i := range statusList {\n\t\tif i == 0 {\n\t\t\twhere = \"WHERE (t.status_id = (SELECT id from portal.test_status WHERE status=$1)\"\n\t\t\tcontinue\n\t\t}\n\t\twhere = where + fmt.Sprintf(\"\\nOR t.status_id = (SELECT id from portal.test_status WHERE status=$%v)\", i+1)\n\t}\n\twhere = where + \")\"\n\n\tquery := `SELECT t.id, t.name, s.status, r.result, array_agg(l.status), t.description, t.created, t.launched, t.stopped \n\tFROM portal.test t \n\tINNER JOIN portal.test_status s \n\tON t.status_id = s.id\n\tLEFT JOIN portal.test_results r\n\tON t.result_id = r.id\n\tLEFT JOIN portal.tests_labels tl\n\tON t.id = tl.test_id\n\tLEFT JOIN portal.labels l\n\tON l.id = tl.label_id\n\t` + where + `\n\tGROUP BY t.id, t.name, s.status, r.result, t.description, t.created, t.launched, t.stopped \n\tORDER BY t.created DESC\n\t`\n\n\ts := make([]interface{}, len(statusList))\n\tfor i, v := range statusList {\n\t\ts[i] = v\n\t}\n\n\trows, err := db.Query(query, s...)\n\tif err != nil {\n\t\tfmt.Println(\"received err:\", err)\n\t\treturn b, err\n\t}\n\tdefer rows.Close()\n\n\tvar tests []Test\n\tfor rows.Next() {\n\t\tvar id, name, status, result, desc, created, launched, stopped string\n\t\tvar sqlCreated, sqlLaunched, sqlStopped pq.NullTime\n\t\tvar labels []sql.NullString\n\t\tvar sqlResult sql.NullString\n\t\tif err := rows.Scan(&id, &name, &status, &sqlResult, pq.Array(&labels), &desc, &sqlCreated, &sqlLaunched, &sqlStopped); err != nil {\n\t\t\tfmt.Println(\"error scanning:\", err)\n\t\t\treturn b, err\n\t\t}\n\n\t\ttimeFormat := \"2006-01-02T15:04:05Z07:00\"\n\n\t\tif sqlResult.Valid {\n\t\t\tresult = sqlResult.String\n\t\t} else {\n\t\t\tresult = \"-\"\n\t\t}\n\n\t\tif sqlCreated.Valid {\n\t\t\tcreated = sqlCreated.Time.Format(timeFormat)\n\t\t} else {\n\t\t\tcreated = \"-\"\n\t\t}\n\n\t\tif sqlLaunched.Valid {\n\t\t\tlaunched = sqlLaunched.Time.Format(timeFormat)\n\t\t} else {\n\t\t\tlaunched = \"-\"\n\t\t}\n\n\t\tif sqlStopped.Valid {\n\t\t\tstopped = sqlStopped.Time.Format(timeFormat)\n\t\t} else {\n\t\t\tstopped = \"-\"\n\t\t}\n\n\t\ttests = append(tests, Test{id, name, desc, status, nullStringToStringSlice(labels), result, created, launched, stopped, \"\"})\n\t}\n\n\tif len(tests) == 0 {\n\t\treturn []byte(\"[]\"), nil\n\t}\n\n\tb, err = json.Marshal(tests)\n\tif err != nil {\n\t\tfmt.Println(\"marshal err:\", err)\n\t}\n\n\treturn b, err\n}",
"func (s *Service) ListStatus(envelopeIdsRequest *model.EnvelopeIdsRequest) *ListStatusOp {\n\treturn &ListStatusOp{\n\t\tCredential: s.credential,\n\t\tMethod: \"PUT\",\n\t\tPath: \"envelopes/status\",\n\t\tPayload: envelopeIdsRequest,\n\t\tQueryOpts: make(url.Values),\n\t\tVersion: esign.VersionV21,\n\t}\n}",
"func (m *MockRoomStorage) GetRoomIDsByStatus(ctx context.Context, scheduler string, status game_room.GameRoomStatus) ([]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetRoomIDsByStatus\", ctx, scheduler, status)\n\tret0, _ := ret[0].([]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (s *userRepo) GetUserByStatus(status string) (users []entity.User, restErr resterrors.RestErr) {\n\n\tquery := `\n\t\t\tSELECT \tu.id,\n\t\t\t\t\tu.first_name,\n\t\t\t\t\tu.last_name,\n\t\t\t\t\tu.email,\n\t\t\t\t\tu.status,\n\t\t\t\t\tu.created_at\n\n\t\t\tFROM\tusers \t\tu \n\n\t\t\tWHERE \tu.status \t= ?;`\n\n\tstmt, err := s.db.Prepare(query)\n\tif err != nil {\n\t\terrorCode := \"Error 0003: \"\n\t\tlogger.Error(fmt.Sprintf(\"%sError when trying to prepare the query statement in GetUserByStatus\", errorCode), err)\n\t\treturn nil, resterrors.NewInternalServerError(fmt.Sprintf(\"%sDatabase error\", errorCode))\n\t}\n\tdefer stmt.Close()\n\n\trows, err := stmt.Query(status)\n\tif err != nil {\n\t\terrorCode := \"Error 0004: \"\n\t\tlogger.Error(fmt.Sprintf(\"%sError when trying to execute Query in GetUserByStatus\", errorCode), err)\n\t\treturn nil, resterrors.NewInternalServerError(fmt.Sprintf(\"%sDatabase error\", errorCode))\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tvar user entity.User\n\t\terr = rows.Scan(\n\t\t\t&user.ID,\n\t\t\t&user.FirstName,\n\t\t\t&user.LastName,\n\t\t\t&user.Email,\n\t\t\t&user.Status,\n\t\t\t&user.CreatedAt,\n\t\t)\n\t\tif err != nil {\n\t\t\terrorCode := \"Error 0005: \"\n\t\t\tlogger.Error(fmt.Sprintf(\"%sError when trying to do For Scan in the Rows GetUserByStatus\", errorCode), err)\n\t\t\treturn nil, mysqlutils.HandleMySQLError(errorCode, err)\n\t\t}\n\n\t\tusers = append(users, user)\n\t}\n\n\tif len(users) == 0 {\n\t\treturn nil, resterrors.NewNotFoundError(fmt.Sprintf(\"No users matching with the status : %s\", status))\n\t}\n\n\treturn users, nil\n}",
"func GetEPrintIDsForPersonOrOrgID(config *Config, repoID string, personOrOrgType string, personOrOrgID string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprint_%s_id.eprintid AS eprintid\nFROM eprint_%s_id JOIN eprint ON (eprint_%s_id.eprintid = eprint.eprintid)\nWHERE eprint_%s_id.%s_id = ?\nORDER BY date_year DESC, date_month DESC, date_day DESC`,\n\t\tpersonOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType)\n\treturn sqlQueryIntIDs(config, repoID, stmt, personOrOrgID)\n}",
"func (ctrl *Controller) Status(s EStatus) []*Elevator {\n\tvar elevators []*Elevator\n\tfor _, el := range ctrl.Elevators {\n\t\tif el.Status == s {\n\t\t\televators = append(elevators, el)\n\t\t}\n\t}\n\treturn elevators\n}",
"func (p *PetController) findPetByStatus(c *gin.Context) {\n\tvar finalPets []models.Pet\n\tauthorizedStatuses := map[string]bool{\"sold\": true, \"available\": true, \"pending\": true}\n\n\tstatuses, valid := c.GetQueryArray(\"status\")\n\tif !valid {\n\t\tlog.Print(\"status is empty\")\n\t\tc.JSON(400, gin.H{\"type\": \"error\", \"message\": \"Invalid status value\"})\n\t\treturn\n\t}\n\n\tfor _, status := range statuses {\n\t\tok := authorizedStatuses[status]\n\t\tif !ok {\n\t\t\tlog.Print(\"status is not authorized\")\n\t\t\tc.JSON(400, gin.H{\"type\": \"error\", \"message\": \"Invalid status value\"})\n\t\t\treturn\n\t\t}\n\n\t\tpets, err := p.Repository.FindPetByStatus(status)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"failed to find the pet in the db: %v\", err)\n\t\t\tc.JSON(http.StatusInternalServerError, gin.H{\"type\": \"error\", \"message\": err.Error()})\n\t\t\treturn\n\t\t}\n\n\t\tfinalPets = append(finalPets, *pets...)\n\t}\n\n\tc.JSON(http.StatusOK, finalPets)\n}",
"func StatusGTE(v int8) predicate.Post {\n\treturn predicate.Post(sql.FieldGTE(FieldStatus, v))\n}",
"func (mr *MockRoomStorageMockRecorder) GetRoomIDsByStatus(ctx, scheduler, status interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetRoomIDsByStatus\", reflect.TypeOf((*MockRoomStorage)(nil).GetRoomIDsByStatus), ctx, scheduler, status)\n}",
"func (insorrepo *InspectorRepo) GetInspectionsByInspectorID(ctx context.Context) ([]*model.Inspection, error) {\n\tinspectorID := ctx.Value(\"inspector_id\").(uint)\n\n\tinspections := []*model.Inspection{}\n\n\t// seelcting multiple inspections using the QueryRows Method of golang\n\tcolmns, er := insorrepo.DB.Query(ctx, \"SELECT * FROM inspections WHERE inspector_id=$1\", inspectorID)\n\tif er != nil {\n\t\treturn inspections, er\n\t}\n\tfor colmns.Next() {\n\t\thandbrake := 0\n\t\tsteeringSystem := 0\n\t\tbrakeSystem := 0\n\t\tseatBelt := 0\n\t\tdoorAndWindow := 0\n\t\tdashBoardLight := 0\n\t\twindshield := 0\n\t\tbaggageDoorWindow := 0\n\t\tgearBox := 0\n\t\tshockAbsorber := 0\n\t\tfrontHighAndLowBeamLight := 0\n\t\trearLightAndBrakeLight := 0\n\t\twiperOperation := 0\n\t\tcarHorn := 0\n\t\tsideMirror := 0\n\t\tgeneralBodyCondition := 0\n\t\t// fetch the Inspection From the database\n\t\tinspection := &model.Inspection{}\n\n\t\tif errs := colmns.Scan(\n\t\t\t&(inspection.ID),\n\t\t\t&(inspection.GarageID),\n\t\t\t&(inspection.InspectorID),\n\t\t\t&(inspection.Drivername),\n\t\t\t&(inspection.VehicleModel),\n\t\t\t&(inspection.VehicleYear),\n\t\t\t&(inspection.VehicleMake),\n\t\t\t&(inspection.VehicleColor),\n\t\t\t&(inspection.LicensePlate),\n\t\t\t&(inspection.FrontImage),\n\t\t\t&(inspection.LeftSideImage),\n\t\t\t&(inspection.RightSideImage),\n\t\t\t&(inspection.BackImage),\n\t\t\t&(inspection.SignatureImage),\n\t\t\t&(inspection.VinNumber),\n\t\t\t&handbrake,\n\t\t\t&steeringSystem,\n\t\t\t&brakeSystem,\n\t\t\t&seatBelt,\n\t\t\t&doorAndWindow,\n\t\t\t&dashBoardLight,\n\t\t\t&windshield,\n\t\t\t&baggageDoorWindow,\n\t\t\t&gearBox,\n\t\t\t&shockAbsorber,\n\t\t\t&frontHighAndLowBeamLight,\n\t\t\t&rearLightAndBrakeLight,\n\t\t\t&wiperOperation,\n\t\t\t&carHorn,\n\t\t\t&sideMirror,\n\t\t\t&generalBodyCondition,\n\t\t\t&(inspection.DriverPerformance),\n\t\t\t&(inspection.Balancing),\n\t\t\t&(inspection.Hazard),\n\t\t\t&(inspection.SignalLightUsage),\n\t\t\t&(inspection.Passed)); errs == nil {\n\t\t\tinspection.HandBrake = insorrepo.GetFunctionalityResultByID(ctx, handbrake)\n\t\t\tinspection.SteeringSystem = insorrepo.GetFunctionalityResultByID(ctx, steeringSystem)\n\t\t\tinspection.BrakeSystem = insorrepo.GetFunctionalityResultByID(ctx, brakeSystem)\n\t\t\tinspection.SeatBelt = insorrepo.GetFunctionalityResultByID(ctx, seatBelt)\n\t\t\tinspection.DoorAndWindow = insorrepo.GetFunctionalityResultByID(ctx, doorAndWindow)\n\t\t\tinspection.DashBoardLight = insorrepo.GetFunctionalityResultByID(ctx, dashBoardLight)\n\t\t\tinspection.WindShield = insorrepo.GetFunctionalityResultByID(ctx, windshield)\n\t\t\tinspection.BaggageDoorWindow = insorrepo.GetFunctionalityResultByID(ctx, baggageDoorWindow)\n\t\t\tinspection.GearBox = insorrepo.GetFunctionalityResultByID(ctx, gearBox)\n\t\t\tinspection.ShockAbsorber = insorrepo.GetFunctionalityResultByID(ctx, shockAbsorber)\n\t\t\tinspection.FrontHighAndLowBeamLight = insorrepo.GetFunctionalityResultByID(ctx, frontHighAndLowBeamLight)\n\t\t\tinspection.RearLightAndBrakeLight = insorrepo.GetFunctionalityResultByID(ctx, rearLightAndBrakeLight)\n\t\t\tinspection.WiperOperation = insorrepo.GetFunctionalityResultByID(ctx, wiperOperation)\n\t\t\tinspection.CarHorn = insorrepo.GetFunctionalityResultByID(ctx, carHorn)\n\t\t\tinspection.SideMirrors = insorrepo.GetFunctionalityResultByID(ctx, sideMirror)\n\t\t\tinspection.GeneralBodyCondition = insorrepo.GetFunctionalityResultByID(ctx, generalBodyCondition)\n\t\t\tinspections = append(inspections, inspection)\n\t\t}\n\t}\n\treturn inspections, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetEPrintIDsInTimestampRange return a list of EPrintIDs in created timestamp range or return error. field maybe either "datestamp" (for created date), "lastmod" (for last modified date) | func GetEPrintIDsInTimestampRange(config *Config, repoID string, field string, start string, end string) ([]int, error) {
stmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE
(CONCAT(%s_year, "-",
LPAD(IFNULL(%s_month, 1), 2, "0"), "-",
LPAD(IFNULL(%s_day, 1), 2, "0"), " ",
LPAD(IFNULL(%s_hour, 0), 2, "0"), ":",
LPAD(IFNULL(%s_minute, 0), 2, "0"), ":",
LPAD(IFNULL(%s_second, 0), 2, "0")) >= ?) AND
(CONCAT(%s_year, "-",
LPAD(IFNULL(%s_month, 12), 2, "0"), "-",
LPAD(IFNULL(%s_day, 28), 2, "0"), " ",
LPAD(IFNULL(%s_hour, 23), 2, "0"), ":",
LPAD(IFNULL(%s_minute, 59), 2, "0"), ":",
LPAD(IFNULL(%s_second, 59), 2, "0")) <= ?)
ORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,
field, field, field, field, field, field, field, field, field, field, field, field,
field, field, field, field, field, field)
return sqlQueryIntIDs(config, repoID, stmt, start, end)
} | [
"func GetEPrintIDsWithStatusInTimestampRange(config *Config, repoID string, status string, field string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE\n(eprint_status = ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 59), 2, \"0\")) <= ?)\nORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,\n\t\tfield, field, field, field, field, field,\n\t\tfield, field, field, field, field, field,\n\t\tfield, field, field, field, field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func TimestampRange() *TimestampRangeFilter {\r\n\tf := new(TimestampRangeFilter)\r\n\r\n\tf.defaultLeftVal = 0\r\n\tf.defaultRightVal = math.MaxUint32\r\n\r\n\treturn f\r\n}",
"func (s *InMemoryStore) InRange(uid string, ti, tf time.Time) ([]model.DrinkLog, error) {\n\tresult := []model.DrinkLog{}\n\n\tif userlogs, exists := s.drinklogs[uid]; exists {\n\t\t// add logs within time range to result\n\t\tfor _, log := range userlogs {\n\t\t\tif log.Time.Before(tf) && log.Time.After(ti) {\n\t\t\t\tresult = append(result, log)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn result, nil\n}",
"func (db *Storage) GetRange(path string, from, to int64) ([]Object, error) {\n\tres := []Object{}\n\tnow := time.Now().UnixNano()\n\tif to == 0 {\n\t\tto = now\n\t}\n\ttimeRange := \"WHERE kv.created >= $2 AND kv.created <= $3\"\n\tqry := getQuery() + \" AS kv \" + timeRange + \";\"\n\t// log.Println(qry)\n\trows, err := db.Client.Query(qry, path, nanoTimestampToRFC3339NoTimezone(from), nanoTimestampToRFC3339NoTimezone(to))\n\tif err != nil {\n\t\tlog.Println(\"failed get on sql\", err)\n\t\treturn res, err\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar entry Entry\n\t\terr = rows.Scan(&entry.Key, &entry.Created, &entry.Updated, &entry.Data)\n\t\tif err != nil {\n\t\t\tlog.Println(\"failed to parse sql entry\", path, err)\n\t\t\tcontinue\n\t\t}\n\t\tupdatedTime := int64(0)\n\t\tif entry.Updated.Valid {\n\t\t\tupdatedTime = entry.Updated.Time.UnixNano()\n\t\t}\n\n\t\tres = append(res, Object{\n\t\t\tCreated: entry.Created.UnixNano(),\n\t\t\tUpdated: updatedTime,\n\t\t\tKey: entry.Key,\n\t\t\tValue: entry.Data,\n\t\t})\n\t}\n\n\treturn res, nil\n}",
"func (a *Repo) TimeRange(ctx context.Context, bundleId int, start, end time.Time) (entities.DboSlice, error) {\n\treturn a.ProducerFunc(\n\t\tctx,\n\t\t\"select * from app_tracking where bundleid = $1 and startat >= $2 and startat <= $3\",\n\t\tbundleId, start, end,\n\t)\n}",
"func GetEPrintIDsWithStatus(config *Config, repoID string, status string, start string, end string) ([]int, error) {\n\tstmt := `SELECT eprintid FROM eprint WHERE (eprint_status = ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 59), 2, \"0\")) <= ?)\nORDER BY lastmod_year DESC, lastmod_month DESC, lastmod_day DESC,\n lastmod_hour DESC, lastmod_minute DESC, lastmod_minute DESC`\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func GetEPrintIDsForDateType(config *Config, repoID string, dateType string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint\nWHERE ((date_type) = ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 1), 2, \"0\")) >= ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 28), 2, \"0\")) <= ?)\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, dateType, start, end)\n}",
"func HistoryTimestamps(limit int, skip int) ([]int64, error) {\n\tctx := context.Background()\n\topt := options.Find().\n\t\tSetLimit(int64(limit)).\n\t\tSetSkip(int64(skip)).\n\t\tSetSort(map[string]int{\"start_time\": -1}).\n\t\tSetProjection(map[string]int{\"start_time\": 1})\n\tcur, err := collection().Find(ctx, nil, opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer cur.Close(ctx)\n\n\tresult := make([]int64, 0)\n\tfor cur.Next(ctx) {\n\t\tt := &struct {\n\t\t\tStartTime int64 `bson:\"start_time\"`\n\t\t}{}\n\t\tif err := cur.Decode(t); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresult = append(result, t.StartTime)\n\t}\n\n\treturn result, nil\n}",
"func (db *Storage) GetNRange(path string, from, to int64, limit int) ([]Object, error) {\n\tres := []Object{}\n\tnow := time.Now().UnixNano()\n\tif to == 0 {\n\t\tto = now\n\t}\n\ttimeRange := \"WHERE kv.created >= $2 AND kv.created <= $3\"\n\tlimitQuery := \"limit $4\"\n\tqry := getQuery() + \" AS kv \" + timeRange + \" \" + limitQuery + \";\"\n\t// log.Println(qry)\n\trows, err := db.Client.Query(qry, path, nanoTimestampToRFC3339NoTimezone(from), nanoTimestampToRFC3339NoTimezone(to), strconv.FormatInt(int64(limit), 10))\n\tif err != nil {\n\t\tlog.Println(\"failed get on sql\", err)\n\t\treturn res, err\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar entry Entry\n\t\terr = rows.Scan(&entry.Key, &entry.Created, &entry.Updated, &entry.Data)\n\t\tif err != nil {\n\t\t\tlog.Println(\"failed to parse sql entry\", path, err)\n\t\t\tcontinue\n\t\t}\n\t\tupdatedTime := int64(0)\n\t\tif entry.Updated.Valid {\n\t\t\tupdatedTime = entry.Updated.Time.UnixNano()\n\t\t}\n\n\t\tres = append(res, Object{\n\t\t\tCreated: entry.Created.UnixNano(),\n\t\t\tUpdated: updatedTime,\n\t\t\tKey: entry.Key,\n\t\t\tValue: entry.Data,\n\t\t})\n\t}\n\n\treturn res, nil\n}",
"func (_BerryGetters *BerryGettersCaller) GetSubmissionsByTimestamp(opts *bind.CallOpts, _requestId *big.Int, _timestamp *big.Int) ([5]*big.Int, error) {\n\tvar (\n\t\tret0 = new([5]*big.Int)\n\t)\n\tout := ret0\n\terr := _BerryGetters.contract.Call(opts, out, \"getSubmissionsByTimestamp\", _requestId, _timestamp)\n\treturn *ret0, err\n}",
"func (_BerryMaster *BerryMasterCaller) GetSubmissionsByTimestamp(opts *bind.CallOpts, _requestId *big.Int, _timestamp *big.Int) ([5]*big.Int, error) {\n\tvar (\n\t\tret0 = new([5]*big.Int)\n\t)\n\tout := ret0\n\terr := _BerryMaster.contract.Call(opts, out, \"getSubmissionsByTimestamp\", _requestId, _timestamp)\n\treturn *ret0, err\n}",
"func (repo *GormRepository) GetUserStampHistory(userID uuid.UUID, limit int) (h []*UserStampHistory, err error) {\n\th = make([]*UserStampHistory, 0)\n\tif userID == uuid.Nil {\n\t\treturn\n\t}\n\terr = repo.db.\n\t\tTable(\"messages_stamps\").\n\t\tWhere(\"user_id = ?\", userID).\n\t\tGroup(\"stamp_id\").\n\t\tSelect(\"stamp_id, max(updated_at) AS datetime\").\n\t\tOrder(\"datetime DESC\").\n\t\tScopes(gormutil.LimitAndOffset(limit, 0)).\n\t\tScan(&h).\n\t\tError\n\treturn\n}",
"func (s StorageSQLite3) GetEventRowsWithinRange(eventRange int) []EventRow {\n\tdb := getDbConn(s.DbName)\n\tdefer db.Close()\n\n\tquery :=\n\t\t\"SELECT \" + DB_COLUMN_ID + \", \" + DB_COLUMN_EVENT + \" FROM \" + DB_TABLE_NAME + \" \" +\n\t\t\t\"ORDER BY \" + DB_COLUMN_ID + \" DESC LIMIT \" + IntToString(eventRange) + \";\"\n\treturn execGetQuery(db, query)\n}",
"func filterTimestamps(in []int64, delta time.Duration) []int64 {\n\tbefore := time.Now().Add(-delta)\n\tout := make([]int64, 0, len(in))\n\n\tfor _, ts := range in {\n\t\ttimestamp := time.Unix(ts, 0)\n\t\tif timestamp.Before(before) {\n\t\t\tcontinue\n\t\t}\n\t\tout = append(out, ts)\n\t}\n\n\treturn out\n}",
"func (db *Storage) GetUpdatedRange(path string, from, to int64) ([]Object, error) {\n\tres := []Object{}\n\tnow := time.Now().UnixNano()\n\tif to == 0 {\n\t\tto = now\n\t}\n\ttimeRange := \"WHERE kv.updated >= $2 AND kv.updated <= $3\"\n\tqry := getQuery() + \" AS kv \" + timeRange + \";\"\n\t// log.Println(qry)\n\trows, err := db.Client.Query(qry, path, nanoTimestampToRFC3339NoTimezone(from), nanoTimestampToRFC3339NoTimezone(to))\n\tif err != nil {\n\t\tlog.Println(\"failed get on sql\", err)\n\t\treturn res, err\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar entry Entry\n\t\terr = rows.Scan(&entry.Key, &entry.Created, &entry.Updated, &entry.Data)\n\t\tif err != nil {\n\t\t\tlog.Println(\"failed to parse sql entry\", path, err)\n\t\t\tcontinue\n\t\t}\n\t\tupdatedTime := int64(0)\n\t\tif entry.Updated.Valid {\n\t\t\tupdatedTime = entry.Updated.Time.UnixNano()\n\t\t}\n\n\t\tres = append(res, Object{\n\t\t\tCreated: entry.Created.UnixNano(),\n\t\t\tUpdated: updatedTime,\n\t\t\tKey: entry.Key,\n\t\t\tValue: entry.Data,\n\t\t})\n\t}\n\n\treturn res, nil\n}",
"func (p *ticketVotePlugin) cmdTimestamps(token []byte, payload string) (string, error) {\n\t// Decode payload\n\tvar t ticketvote.Timestamps\n\terr := json.Unmarshal([]byte(payload), &t)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar (\n\t\tauths = make([]ticketvote.Timestamp, 0, 32)\n\t\tdetails *ticketvote.Timestamp\n\n\t\tpageSize = p.timestampsPageSize\n\t\tvotes = make([]ticketvote.Timestamp, 0, pageSize)\n\t)\n\tswitch {\n\tcase t.VotesPage > 0:\n\t\t// Return a page of vote timestamps\n\n\t\t// Look for final vote timestamps in the key-value cache\n\t\tcachedVotes, err := p.cachedVoteTimestamps(token, t.VotesPage, pageSize)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t// Get all cast vote digests from tstore\n\t\tdigests, err := p.tstore.DigestsByDataDesc(token,\n\t\t\t[]string{dataDescriptorCastVoteDetails})\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"digestsByKeyPrefix %x %v: %v\",\n\t\t\t\ttoken, dataDescriptorVoteDetails, err)\n\t\t}\n\n\t\tstartAt := (t.VotesPage - 1) * pageSize\n\t\tfor i, v := range digests {\n\t\t\tif i < int(startAt) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Check if current digest timestamp already exists in cache\n\t\t\tvar foundInCache bool\n\t\t\tfor _, t := range cachedVotes {\n\t\t\t\tif t.Digest == hex.EncodeToString(v) {\n\t\t\t\t\t// Digest timestamp found, collect it\n\t\t\t\t\tvotes = append(votes, t)\n\t\t\t\t\tfoundInCache = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\t// If digest was found in cache, continue to next digest\n\t\t\tif foundInCache {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Digest was not found in cache, get timestamp\n\t\t\tts, err := p.timestamp(token, v)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", fmt.Errorf(\"timestamp %x %x: %v\",\n\t\t\t\t\ttoken, v, err)\n\t\t\t}\n\t\t\tvotes = append(votes, *ts)\n\n\t\t\tif len(votes) == int(pageSize) {\n\t\t\t\t// We have a full page. We're done.\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// Cache final vote timestamps\n\t\terr = p.cacheFinalVoteTimestamps(token, votes, t.VotesPage)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\tdefault:\n\t\t// Return authorization timestamps and the vote details\n\t\t// timestamp.\n\n\t\t// Auth timestamps\n\n\t\t// Look for final auth timestamps in the key-value cache\n\t\tcachedAuths, err := p.cachedAuthTimestamps(token)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t// Get all auth digests from tstore\n\t\tdigests, err := p.tstore.DigestsByDataDesc(token,\n\t\t\t[]string{dataDescriptorAuthDetails})\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"DigestByDataDesc %x %v: %v\",\n\t\t\t\ttoken, dataDescriptorAuthDetails, err)\n\t\t}\n\t\tauths = make([]ticketvote.Timestamp, 0, len(digests))\n\t\tfor _, v := range digests {\n\t\t\t// Check if current digest timestamp already exists in cache\n\t\t\tvar foundInCache bool\n\t\t\tfor _, t := range cachedAuths {\n\t\t\t\tif t.Digest == hex.EncodeToString(v) {\n\t\t\t\t\t// Digest timestamp found, collect it\n\t\t\t\t\tauths = append(auths, t)\n\t\t\t\t\tfoundInCache = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\t// If digest was found in cache, continue to next digest\n\t\t\tif foundInCache {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Digest was not found in cache, get timestamp\n\t\t\tts, err := p.timestamp(token, v)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", fmt.Errorf(\"timestamp %x %x: %v\",\n\t\t\t\t\ttoken, v, err)\n\t\t\t}\n\t\t\tauths = append(auths, *ts)\n\t\t}\n\n\t\t// Cache final auth timestamps\n\t\terr = p.cacheFinalAuthTimestamps(token, auths)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t// Vote details timestamp\n\n\t\t// Look for final vote details timestamp in the key-value cache\n\t\tcachedDetails, err := p.cachedDetailsTimestamp(token)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t// Get vote details digests from tstore\n\t\tdigests, err = p.tstore.DigestsByDataDesc(token,\n\t\t\t[]string{dataDescriptorVoteDetails})\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"DigestsByDataDesc %x %v: %v\",\n\t\t\t\ttoken, dataDescriptorVoteDetails, err)\n\t\t}\n\t\t// There should never be more than a one vote details\n\t\tif len(digests) > 1 {\n\t\t\treturn \"\", fmt.Errorf(\"invalid vote details count: \"+\n\t\t\t\t\"got %v, want 1\", len(digests))\n\t\t}\n\t\tfor _, v := range digests {\n\t\t\t// Check if vote details digest timestamp already exists in cache\n\t\t\tswitch {\n\t\t\tcase cachedDetails != nil:\n\t\t\t\tif cachedDetails.Digest == hex.EncodeToString(v) {\n\t\t\t\t\t// Digest timestamp found, collect it\n\t\t\t\t\tdetails = cachedDetails\n\t\t\t\t}\n\n\t\t\tcase cachedDetails == nil:\n\t\t\t\t// Vote details timestamp was not found in cache, get timestamp\n\t\t\t\tts, err := p.timestamp(token, v)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", fmt.Errorf(\"timestamp %x %x: %v\",\n\t\t\t\t\t\ttoken, v, err)\n\t\t\t\t}\n\t\t\t\tdetails = ts\n\t\t\t}\n\t\t}\n\n\t\t// Cache final vote details timestamp\n\t\tif details != nil {\n\t\t\terr = p.cacheFinalDetailsTimestamp(token, *details)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Prepare reply\n\ttr := ticketvote.TimestampsReply{\n\t\tAuths: auths,\n\t\tDetails: details,\n\t\tVotes: votes,\n\t}\n\treply, err := json.Marshal(tr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(reply), nil\n}",
"func (bs *blockSearch) getTimestamps() []int64 {\n\ttimestamps := bs.timestampsCache\n\tif timestamps != nil {\n\t\treturn timestamps.A\n\t}\n\n\tp := bs.bsw.p\n\n\tbb := longTermBufPool.Get()\n\tth := &bs.bsw.bh.timestampsHeader\n\tblockSize := th.blockSize\n\tif blockSize > maxTimestampsBlockSize {\n\t\tlogger.Panicf(\"FATAL: %s: timestamps block size cannot exceed %d bytes; got %d bytes\", bs.partPath(), maxTimestampsBlockSize, blockSize)\n\t}\n\tbb.B = bytesutil.ResizeNoCopyMayOverallocate(bb.B, int(blockSize))\n\tp.timestampsFile.MustReadAt(bb.B, int64(th.blockOffset))\n\n\trowsCount := int(bs.bsw.bh.rowsCount)\n\ttimestamps = encoding.GetInt64s(rowsCount)\n\tvar err error\n\ttimestamps.A, err = encoding.UnmarshalTimestamps(timestamps.A[:0], bb.B, th.marshalType, th.minTimestamp, rowsCount)\n\tlongTermBufPool.Put(bb)\n\tif err != nil {\n\t\tlogger.Panicf(\"FATAL: %s: cannot unmarshal timestamps: %s\", bs.partPath(), err)\n\t}\n\tbs.timestampsCache = timestamps\n\treturn timestamps.A\n}",
"func (g *GitInfo) Range(begin, end time.Time) []*vcsinfo.IndexCommit {\n\tg.mutex.Lock()\n\tdefer g.mutex.Unlock()\n\tret := []*vcsinfo.IndexCommit{}\n\tfirst := sort.Search(len(g.hashes), func(i int) bool {\n\t\tts := g.timestamps[g.hashes[i]]\n\t\treturn ts.After(begin) || ts.Equal(begin)\n\t})\n\tif first == len(g.timestamps) {\n\t\treturn ret\n\t}\n\tfor i, h := range g.hashes[first:] {\n\t\tif g.timestamps[h].Before(end) {\n\t\t\tret = append(ret, &vcsinfo.IndexCommit{\n\t\t\t\tHash: h,\n\t\t\t\tIndex: first + i,\n\t\t\t\tTimestamp: g.timestamps[h],\n\t\t\t})\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn ret\n}",
"func queryRangeLog(\n\tconn *gosql.DB, query string, args ...interface{},\n) ([]kvserverpb.RangeLogEvent_Info, error) {\n\n\t// The range log can get large and sees unpredictable writes, so run this in a\n\t// proper txn to avoid spurious retries.\n\tvar events []kvserverpb.RangeLogEvent_Info\n\terr := crdb.ExecuteTx(context.Background(), conn, nil, func(conn *gosql.Tx) error {\n\t\tevents = nil // reset in case of a retry\n\n\t\trows, err := conn.Query(query, args...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdefer rows.Close()\n\t\tvar numEntries int\n\t\tfor rows.Next() {\n\t\t\tnumEntries++\n\t\t\tvar infoStr string\n\t\t\tif err := rows.Scan(&infoStr); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tvar info kvserverpb.RangeLogEvent_Info\n\t\t\tif err := json.Unmarshal([]byte(infoStr), &info); err != nil {\n\t\t\t\treturn errors.Errorf(\"error unmarshaling info string %q: %s\", infoStr, err)\n\t\t\t}\n\t\t\tevents = append(events, info)\n\t\t}\n\t\tif err := rows.Err(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\treturn events, err\n\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetEPrintIDsWithStatusInTimestampRange return a list of EPrintIDs with eprint_status in field timestamp range or return error. field maybe either "datestamp" (for created date), "lastmod" (for last modified date) | func GetEPrintIDsWithStatusInTimestampRange(config *Config, repoID string, status string, field string, start string, end string) ([]int, error) {
stmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE
(eprint_status = ?) AND
(CONCAT(%s_year, "-",
LPAD(IFNULL(%s_month, 1), 2, "0"), "-",
LPAD(IFNULL(%s_day, 1), 2, "0"), " ",
LPAD(IFNULL(%s_hour, 0), 2, "0"), ":",
LPAD(IFNULL(%s_minute, 0), 2, "0"), ":",
LPAD(IFNULL(%s_second, 0), 2, "0")) >= ?) AND
(CONCAT(%s_year, "-",
LPAD(IFNULL(%s_month, 12), 2, "0"), "-",
LPAD(IFNULL(%s_day, 28), 2, "0"), " ",
LPAD(IFNULL(%s_hour, 23), 2, "0"), ":",
LPAD(IFNULL(%s_minute, 59), 2, "0"), ":",
LPAD(IFNULL(%s_second, 59), 2, "0")) <= ?)
ORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,
field, field, field, field, field, field,
field, field, field, field, field, field,
field, field, field, field, field, field)
return sqlQueryIntIDs(config, repoID, stmt, status, start, end)
} | [
"func GetEPrintIDsInTimestampRange(config *Config, repoID string, field string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 59), 2, \"0\")) <= ?)\nORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,\n\t\tfield, field, field, field, field, field, field, field, field, field, field, field,\n\t\tfield, field, field, field, field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, start, end)\n}",
"func GetEPrintIDsWithStatus(config *Config, repoID string, status string, start string, end string) ([]int, error) {\n\tstmt := `SELECT eprintid FROM eprint WHERE (eprint_status = ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 59), 2, \"0\")) <= ?)\nORDER BY lastmod_year DESC, lastmod_month DESC, lastmod_day DESC,\n lastmod_hour DESC, lastmod_minute DESC, lastmod_minute DESC`\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func GetEPrintIDsWithStatusForDateType(config *Config, repoID string, status string, dateType string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint\nWHERE (eprint_status = ? ) AND (date_type = ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 1), 2, \"0\")) >= ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 28), 2, \"0\")) <= ?)\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, dateType, start, end)\n}",
"func GetAllEPrintIDsWithStatus(config *Config, repoID string, status string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint WHERE (eprint_status = ?) ORDER BY date_year DESC, date_month DESC, date_day DESC`, status)\n}",
"func (s *InMemoryStore) InRange(uid string, ti, tf time.Time) ([]model.DrinkLog, error) {\n\tresult := []model.DrinkLog{}\n\n\tif userlogs, exists := s.drinklogs[uid]; exists {\n\t\t// add logs within time range to result\n\t\tfor _, log := range userlogs {\n\t\t\tif log.Time.Before(tf) && log.Time.After(ti) {\n\t\t\t\tresult = append(result, log)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn result, nil\n}",
"func GetEPrintIDsForDateType(config *Config, repoID string, dateType string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint\nWHERE ((date_type) = ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 1), 2, \"0\")) >= ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 28), 2, \"0\")) <= ?)\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, dateType, start, end)\n}",
"func TimestampRange() *TimestampRangeFilter {\r\n\tf := new(TimestampRangeFilter)\r\n\r\n\tf.defaultLeftVal = 0\r\n\tf.defaultRightVal = math.MaxUint32\r\n\r\n\treturn f\r\n}",
"func (s *Service) ListStatus(envelopeIdsRequest *model.EnvelopeIdsRequest) *ListStatusOp {\n\treturn &ListStatusOp{\n\t\tCredential: s.credential,\n\t\tMethod: \"PUT\",\n\t\tPath: \"envelopes/status\",\n\t\tPayload: envelopeIdsRequest,\n\t\tQueryOpts: make(url.Values),\n\t\tVersion: esign.VersionV21,\n\t}\n}",
"func queryRangeLog(\n\tconn *gosql.DB, query string, args ...interface{},\n) ([]kvserverpb.RangeLogEvent_Info, error) {\n\n\t// The range log can get large and sees unpredictable writes, so run this in a\n\t// proper txn to avoid spurious retries.\n\tvar events []kvserverpb.RangeLogEvent_Info\n\terr := crdb.ExecuteTx(context.Background(), conn, nil, func(conn *gosql.Tx) error {\n\t\tevents = nil // reset in case of a retry\n\n\t\trows, err := conn.Query(query, args...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdefer rows.Close()\n\t\tvar numEntries int\n\t\tfor rows.Next() {\n\t\t\tnumEntries++\n\t\t\tvar infoStr string\n\t\t\tif err := rows.Scan(&infoStr); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tvar info kvserverpb.RangeLogEvent_Info\n\t\t\tif err := json.Unmarshal([]byte(infoStr), &info); err != nil {\n\t\t\t\treturn errors.Errorf(\"error unmarshaling info string %q: %s\", infoStr, err)\n\t\t\t}\n\t\t\tevents = append(events, info)\n\t\t}\n\t\tif err := rows.Err(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n\treturn events, err\n\n}",
"func (s *adminServer) RangeLog(\n\tctx context.Context, req *serverpb.RangeLogRequest,\n) (_ *serverpb.RangeLogResponse, retErr error) {\n\t// All errors returned by this method must be serverErrors. We are careful\n\t// to not use serverError* methods in the body of the function, so we can\n\t// just do it here.\n\tdefer func() {\n\t\tif retErr != nil {\n\t\t\tretErr = s.serverError(retErr)\n\t\t}\n\t}()\n\n\tctx = s.server.AnnotateCtx(ctx)\n\n\t// Range keys, even when pretty-printed, contain PII.\n\tuserName, err := s.requireAdminUser(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlimit := req.Limit\n\tif limit == 0 {\n\t\tlimit = defaultAPIEventLimit\n\t}\n\n\tincludeRawKeys := debug.GatewayRemoteAllowed(ctx, s.server.ClusterSettings())\n\n\t// Execute the query.\n\tq := makeSQLQuery()\n\tq.Append(`SELECT timestamp, \"rangeID\", \"storeID\", \"eventType\", \"otherRangeID\", info `)\n\tq.Append(\"FROM system.rangelog \")\n\tif req.RangeId > 0 {\n\t\trangeID := tree.NewDInt(tree.DInt(req.RangeId))\n\t\tq.Append(`WHERE \"rangeID\" = $ OR \"otherRangeID\" = $`, rangeID, rangeID)\n\t}\n\tif limit > 0 {\n\t\tq.Append(\"ORDER BY timestamp desc \")\n\t\tq.Append(\"LIMIT $\", tree.NewDInt(tree.DInt(limit)))\n\t}\n\tif len(q.Errors()) > 0 {\n\t\treturn nil, combineAllErrors(q.Errors())\n\t}\n\tit, err := s.server.sqlServer.internalExecutor.QueryIteratorEx(\n\t\tctx, \"admin-range-log\", nil, /* txn */\n\t\tsessiondata.InternalExecutorOverride{User: userName},\n\t\tq.String(), q.QueryArguments()...,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// We have to make sure to close the iterator since we might return from the\n\t// for loop early (before Next() returns false).\n\tdefer func() { retErr = errors.CombineErrors(retErr, it.Close()) }()\n\n\t// Marshal response.\n\tvar resp serverpb.RangeLogResponse\n\tok, err := it.Next(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !ok {\n\t\t// The query returned 0 rows.\n\t\treturn &resp, nil\n\t}\n\tcols := it.Types()\n\tif len(cols) != 6 {\n\t\treturn nil, errors.Errorf(\"incorrect number of columns in response, expected 6, got %d\", len(cols))\n\t}\n\tscanner := makeResultScanner(cols)\n\tfor ; ok; ok, err = it.Next(ctx) {\n\t\trow := it.Cur()\n\t\tvar event kvserverpb.RangeLogEvent\n\t\tvar ts time.Time\n\t\tif err := scanner.ScanIndex(row, 0, &ts); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"timestamp didn't parse correctly: %s\", row[0].String())\n\t\t}\n\t\tevent.Timestamp = ts\n\t\tvar rangeID int64\n\t\tif err := scanner.ScanIndex(row, 1, &rangeID); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"RangeID didn't parse correctly: %s\", row[1].String())\n\t\t}\n\t\tevent.RangeID = roachpb.RangeID(rangeID)\n\t\tvar storeID int64\n\t\tif err := scanner.ScanIndex(row, 2, &storeID); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"StoreID didn't parse correctly: %s\", row[2].String())\n\t\t}\n\t\tevent.StoreID = roachpb.StoreID(int32(storeID))\n\t\tvar eventTypeString string\n\t\tif err := scanner.ScanIndex(row, 3, &eventTypeString); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"EventType didn't parse correctly: %s\", row[3].String())\n\t\t}\n\t\tif eventType, ok := kvserverpb.RangeLogEventType_value[eventTypeString]; ok {\n\t\t\tevent.EventType = kvserverpb.RangeLogEventType(eventType)\n\t\t} else {\n\t\t\treturn nil, errors.Errorf(\"EventType didn't parse correctly: %s\", eventTypeString)\n\t\t}\n\n\t\tvar otherRangeID int64\n\t\tif row[4].String() != \"NULL\" {\n\t\t\tif err := scanner.ScanIndex(row, 4, &otherRangeID); err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"OtherRangeID didn't parse correctly: %s\", row[4].String())\n\t\t\t}\n\t\t\tevent.OtherRangeID = roachpb.RangeID(otherRangeID)\n\t\t}\n\n\t\tvar prettyInfo serverpb.RangeLogResponse_PrettyInfo\n\t\tif row[5].String() != \"NULL\" {\n\t\t\tvar info string\n\t\t\tif err := scanner.ScanIndex(row, 5, &info); err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"info didn't parse correctly: %s\", row[5].String())\n\t\t\t}\n\t\t\tif err := json.Unmarshal([]byte(info), &event.Info); err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"info didn't parse correctly: %s\", info)\n\t\t\t}\n\t\t\tif event.Info.NewDesc != nil {\n\t\t\t\tif !includeRawKeys {\n\t\t\t\t\tevent.Info.NewDesc.StartKey = nil\n\t\t\t\t\tevent.Info.NewDesc.EndKey = nil\n\t\t\t\t}\n\t\t\t\tprettyInfo.NewDesc = event.Info.NewDesc.String()\n\t\t\t}\n\t\t\tif event.Info.UpdatedDesc != nil {\n\t\t\t\tif !includeRawKeys {\n\t\t\t\t\tevent.Info.UpdatedDesc.StartKey = nil\n\t\t\t\t\tevent.Info.UpdatedDesc.EndKey = nil\n\t\t\t\t}\n\t\t\t\tprettyInfo.UpdatedDesc = event.Info.UpdatedDesc.String()\n\t\t\t}\n\t\t\tif event.Info.AddedReplica != nil {\n\t\t\t\tprettyInfo.AddedReplica = event.Info.AddedReplica.String()\n\t\t\t}\n\t\t\tif event.Info.RemovedReplica != nil {\n\t\t\t\tprettyInfo.RemovedReplica = event.Info.RemovedReplica.String()\n\t\t\t}\n\t\t\tprettyInfo.Reason = string(event.Info.Reason)\n\t\t\tprettyInfo.Details = event.Info.Details\n\t\t}\n\n\t\tresp.Events = append(resp.Events, serverpb.RangeLogResponse_Event{\n\t\t\tEvent: event,\n\t\t\tPrettyInfo: prettyInfo,\n\t\t})\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}",
"func (a *Repo) TimeRange(ctx context.Context, bundleId int, start, end time.Time) (entities.DboSlice, error) {\n\treturn a.ProducerFunc(\n\t\tctx,\n\t\t\"select * from app_tracking where bundleid = $1 and startat >= $2 and startat <= $3\",\n\t\tbundleId, start, end,\n\t)\n}",
"func (VS *Server) getAuditsByServiceAndDate(c *gin.Context, from int64, to int64, IDservice bson.ObjectId, class string, method string, operation string) []Audit {\n\tvar aud []Audit\n\terr := VS.DataBase.Find(&aud, bolthold.Where(\"IDservice\").Eq(IDservice).And(\"Date\").Gt(from).And(\"Date\").Lt(to))\n\tVS.checkOperation(c, class, method, operation, err)\n\treturn aud\n}",
"func (a *AuditRecord) List(from, size int, start, end time.Time, status string) (*[]AuditRecord, error) {\n\to := orm.NewOrm()\n\tqs := o.QueryTable(a.TableName())\n\tif a.Namespace != \"\" {\n\t\tqs = qs.Filter(\"namespace\", a.Namespace)\n\t}\n\tif a.OperationType != 0 {\n\t\tqs = qs.Filter(\"operation_type\", a.OperationType)\n\t}\n\tif a.ResourceType != 0 {\n\t\tqs = qs.Filter(\"resource_type\", a.ResourceType)\n\t}\n\n\tif !start.IsZero() {\n\t\tqs = qs.Filter(\"time__gte\", start)\n\t}\n\tif !end.IsZero() {\n\t\tqs = qs.Filter(\"time__lte\", end)\n\t}\n\n\tif status == \"failed\" { // failed\n\t\tqs = qs.Exclude(\"status\", 0).Exclude(\"status\", 200)\n\t} else if status == \"running\" { // running\n\t\tqs = qs.Filter(\"status\", 0)\n\t} else if status == \"success\" { // success\n\t\tqs = qs.Filter(\"status\", 200)\n\t}\n\n\tvar records []AuditRecord\n\t_, err := qs.Offset(from).Limit(size).All(&records)\n\treturn &records, err\n}",
"func (r *Reader) GetServiceLevelMessagesInTimeRange(startTime, endTime int64, service, level string) (messages []*PlainMessage) {\n\tvar block *Block\n\tfor _, store := range r.Stores {\n\t\tblock = store.GetBlock(startTime, endTime, service, level)\n\t\tif block != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tplainMessageStack := block.toPlainMessageStack()\n\tplainMessageStack.Flip()\n\tfor !plainMessageStack.Empty() {\n\t\tmessages = append(messages, plainMessageStack.PopMessageContainer().(*PlainMessage))\n\t}\n\treturn\n}",
"func (VS *Server) getAuditsByUserAndDate(c *gin.Context, from int64, to int64, userID bson.ObjectId, class string, method string, operation string) []Audit {\n\tvar aud []Audit\n\terr := VS.DataBase.Find(&aud, bolthold.Where(\"IDuser\").Eq(userID).And(\"Date\").Gt(from).And(\"Date\").Lt(to))\n\tVS.checkOperation(c, class, method, operation, err)\n\treturn aud\n}",
"func GetRangeListsNotFound(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.ListsController, listID string, fromBottom *int, fromTop *int, limit int) (http.ResponseWriter, error) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tquery := url.Values{}\n\tif fromBottom != nil {\n\t\tsliceVal := []string{strconv.Itoa(*fromBottom)}\n\t\tquery[\"from_bottom\"] = sliceVal\n\t}\n\tif fromTop != nil {\n\t\tsliceVal := []string{strconv.Itoa(*fromTop)}\n\t\tquery[\"from_top\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{strconv.Itoa(limit)}\n\t\tquery[\"limit\"] = sliceVal\n\t}\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/lists/%v/range\", listID),\n\t\tRawQuery: query.Encode(),\n\t}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\tpanic(\"invalid test \" + err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tprms[\"list_id\"] = []string{fmt.Sprintf(\"%v\", listID)}\n\tif fromBottom != nil {\n\t\tsliceVal := []string{strconv.Itoa(*fromBottom)}\n\t\tprms[\"from_bottom\"] = sliceVal\n\t}\n\tif fromTop != nil {\n\t\tsliceVal := []string{strconv.Itoa(*fromTop)}\n\t\tprms[\"from_top\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{strconv.Itoa(limit)}\n\t\tprms[\"limit\"] = sliceVal\n\t}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"ListsTest\"), rw, req, prms)\n\tgetRangeCtx, _err := app.NewGetRangeListsContext(goaCtx, req, service)\n\tif _err != nil {\n\t\te, ok := _err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(\"invalid test data \" + _err.Error()) // bug\n\t\t}\n\t\treturn nil, e\n\t}\n\n\t// Perform action\n\t_err = ctrl.GetRange(getRangeCtx)\n\n\t// Validate response\n\tif _err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", _err, logBuf.String())\n\t}\n\tif rw.Code != 404 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 404\", rw.Code)\n\t}\n\tvar mt error\n\tif resp != nil {\n\t\tvar _ok bool\n\t\tmt, _ok = resp.(error)\n\t\tif !_ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of error\", resp, resp)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}",
"func GetEPrintIDsForYear(config *Config, repoID string, year int) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE date_type = \"published\" AND date_year = ? ORDER BY date_year DESC, date_month DESC, date_day DESC`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, year)\n}",
"func GetRangeListsUnauthorized(t goatest.TInterface, ctx context.Context, service *goa.Service, ctrl app.ListsController, listID string, fromBottom *int, fromTop *int, limit int) (http.ResponseWriter, error) {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tquery := url.Values{}\n\tif fromBottom != nil {\n\t\tsliceVal := []string{strconv.Itoa(*fromBottom)}\n\t\tquery[\"from_bottom\"] = sliceVal\n\t}\n\tif fromTop != nil {\n\t\tsliceVal := []string{strconv.Itoa(*fromTop)}\n\t\tquery[\"from_top\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{strconv.Itoa(limit)}\n\t\tquery[\"limit\"] = sliceVal\n\t}\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/lists/%v/range\", listID),\n\t\tRawQuery: query.Encode(),\n\t}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\tpanic(\"invalid test \" + err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tprms[\"list_id\"] = []string{fmt.Sprintf(\"%v\", listID)}\n\tif fromBottom != nil {\n\t\tsliceVal := []string{strconv.Itoa(*fromBottom)}\n\t\tprms[\"from_bottom\"] = sliceVal\n\t}\n\tif fromTop != nil {\n\t\tsliceVal := []string{strconv.Itoa(*fromTop)}\n\t\tprms[\"from_top\"] = sliceVal\n\t}\n\t{\n\t\tsliceVal := []string{strconv.Itoa(limit)}\n\t\tprms[\"limit\"] = sliceVal\n\t}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"ListsTest\"), rw, req, prms)\n\tgetRangeCtx, _err := app.NewGetRangeListsContext(goaCtx, req, service)\n\tif _err != nil {\n\t\te, ok := _err.(goa.ServiceError)\n\t\tif !ok {\n\t\t\tpanic(\"invalid test data \" + _err.Error()) // bug\n\t\t}\n\t\treturn nil, e\n\t}\n\n\t// Perform action\n\t_err = ctrl.GetRange(getRangeCtx)\n\n\t// Validate response\n\tif _err != nil {\n\t\tt.Fatalf(\"controller returned %+v, logs:\\n%s\", _err, logBuf.String())\n\t}\n\tif rw.Code != 401 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 401\", rw.Code)\n\t}\n\tvar mt error\n\tif resp != nil {\n\t\tvar _ok bool\n\t\tmt, _ok = resp.(error)\n\t\tif !_ok {\n\t\t\tt.Fatalf(\"invalid response media: got variable of type %T, value %+v, expected instance of error\", resp, resp)\n\t\t}\n\t}\n\n\t// Return results\n\treturn rw, mt\n}",
"func (s StorageSQLite3) GetEventRowsWithinRange(eventRange int) []EventRow {\n\tdb := getDbConn(s.DbName)\n\tdefer db.Close()\n\n\tquery :=\n\t\t\"SELECT \" + DB_COLUMN_ID + \", \" + DB_COLUMN_EVENT + \" FROM \" + DB_TABLE_NAME + \" \" +\n\t\t\t\"ORDER BY \" + DB_COLUMN_ID + \" DESC LIMIT \" + IntToString(eventRange) + \";\"\n\treturn execGetQuery(db, query)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetEPrintIDsWithStatus returns a list of eprints in a timestmap range for a given status or returns an error | func GetEPrintIDsWithStatus(config *Config, repoID string, status string, start string, end string) ([]int, error) {
stmt := `SELECT eprintid FROM eprint WHERE (eprint_status = ?) AND
(CONCAT(lastmod_year, "-",
LPAD(IFNULL(lastmod_month, 1), 2, "0"), "-",
LPAD(IFNULL(lastmod_day, 1), 2, "0"), " ",
LPAD(IFNULL(lastmod_hour, 0), 2, "0"), ":",
LPAD(IFNULL(lastmod_minute, 0), 2, "0"), ":",
LPAD(IFNULL(lastmod_second, 0), 2, "0")) >= ?) AND
(CONCAT(lastmod_year, "-",
LPAD(IFNULL(lastmod_month, 12), 2, "0"), "-",
LPAD(IFNULL(lastmod_day, 28), 2, "0"), " ",
LPAD(IFNULL(lastmod_hour, 23), 2, "0"), ":",
LPAD(IFNULL(lastmod_minute, 59), 2, "0"), ":",
LPAD(IFNULL(lastmod_second, 59), 2, "0")) <= ?)
ORDER BY lastmod_year DESC, lastmod_month DESC, lastmod_day DESC,
lastmod_hour DESC, lastmod_minute DESC, lastmod_minute DESC`
return sqlQueryIntIDs(config, repoID, stmt, status, start, end)
} | [
"func GetEPrintIDsWithStatusInTimestampRange(config *Config, repoID string, status string, field string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE\n(eprint_status = ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 59), 2, \"0\")) <= ?)\nORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,\n\t\tfield, field, field, field, field, field,\n\t\tfield, field, field, field, field, field,\n\t\tfield, field, field, field, field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func GetAllEPrintIDsWithStatus(config *Config, repoID string, status string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint WHERE (eprint_status = ?) ORDER BY date_year DESC, date_month DESC, date_day DESC`, status)\n}",
"func GetEPrintIDsWithStatusForDateType(config *Config, repoID string, status string, dateType string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint\nWHERE (eprint_status = ? ) AND (date_type = ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 1), 2, \"0\")) >= ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 28), 2, \"0\")) <= ?)\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, dateType, start, end)\n}",
"func GetEPrintIDsInTimestampRange(config *Config, repoID string, field string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 59), 2, \"0\")) <= ?)\nORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,\n\t\tfield, field, field, field, field, field, field, field, field, field, field, field,\n\t\tfield, field, field, field, field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, start, end)\n}",
"func (ctrl *Controller) Status(s EStatus) []*Elevator {\n\tvar elevators []*Elevator\n\tfor _, el := range ctrl.Elevators {\n\t\tif el.Status == s {\n\t\t\televators = append(elevators, el)\n\t\t}\n\t}\n\treturn elevators\n}",
"func (s *Service) ListStatus(envelopeIdsRequest *model.EnvelopeIdsRequest) *ListStatusOp {\n\treturn &ListStatusOp{\n\t\tCredential: s.credential,\n\t\tMethod: \"PUT\",\n\t\tPath: \"envelopes/status\",\n\t\tPayload: envelopeIdsRequest,\n\t\tQueryOpts: make(url.Values),\n\t\tVersion: esign.VersionV21,\n\t}\n}",
"func ListStatusEgroups(r *http.Request, cfg config.Config) (int, http.Header, []byte, error) {\n\n\t//STANDARD DECLARATIONS START\n\n\tcode := http.StatusOK\n\th := http.Header{}\n\toutput := []byte(\"List Flapping Metrics\")\n\terr := error(nil)\n\tcharset := \"utf-8\"\n\n\t//STANDARD DECLARATIONS END\n\n\t// Set Content-Type response Header value\n\tcontentType := r.Header.Get(\"Accept\")\n\th.Set(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Parse the request into the input\n\turlValues := r.URL.Query()\n\tvars := mux.Vars(r)\n\n\t// Grab Tenant DB configuration from context\n\ttenantDbConfig := context.Get(r, \"tenant_conf\").(config.MongoConfig)\n\n\tsession, err := mongo.OpenSession(tenantDbConfig)\n\tdefer mongo.CloseSession(session)\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn code, h, output, err\n\t}\n\n\tegroupCollection := session.DB(tenantDbConfig.Db).C(statusEgroups)\n\n\t// Query the detailed status services trend results\n\treportID, err := mongo.GetReportID(session, tenantDbConfig.Db, vars[\"report_name\"])\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn code, h, output, err\n\t}\n\n\tstartDate, endDate, err := getDateRange(urlValues)\n\tif err != nil {\n\t\tcode = http.StatusBadRequest\n\t\toutput, _ = respond.MarshalContent(respond.ErrBadRequestDetails(err.Error()), contentType, \"\", \" \")\n\t\treturn code, h, output, err\n\t}\n\n\tlimit := -1\n\tlimStr := urlValues.Get(\"top\")\n\tif limStr != \"\" {\n\t\tlimit, err = strconv.Atoi(limStr)\n\t\tif err != nil {\n\t\t\tcode = http.StatusInternalServerError\n\t\t\treturn code, h, output, err\n\t\t}\n\t}\n\n\tgranularity := urlValues.Get(\"granularity\")\n\n\t// query for endpoint groups\n\tfilter := bson.M{\"report\": reportID, \"date\": bson.M{\"$gte\": startDate, \"$lte\": endDate}}\n\n\t// apply query for bucketed monthly results if granularity is set to monthly\n\tif granularity == \"monthly\" {\n\n\t\tresults := []StatusMonthEgroupData{}\n\n\t\tquery := []bson.M{\n\t\t\t{\"$match\": filter},\n\t\t\t{\"$group\": bson.M{\n\t\t\t\t\"_id\": bson.M{\n\t\t\t\t\t\"month\": bson.M{\"$substr\": list{\"$date\", 0, 6}},\n\t\t\t\t\t\"group\": \"$group\",\n\t\t\t\t\t\"status\": \"$status\"},\n\t\t\t\t\"duration\": bson.M{\"$sum\": \"$duration\"},\n\t\t\t}},\n\t\t\t{\"$sort\": bson.D{{\"_id.month\", 1}, {\"_id.status\", 1}, {\"duration\", -1}}},\n\t\t\t{\n\t\t\t\t\"$group\": bson.M{\n\t\t\t\t\t\"_id\": bson.M{\"month\": \"$_id.month\", \"status\": \"$_id.status\"},\n\t\t\t\t\t\"top\": bson.M{\"$push\": bson.M{\"group\": \"$_id.group\", \"status\": \"$_id.status\", \"duration\": \"$duration\"}}}},\n\t\t}\n\n\t\t// trim down the list in each month-bucket according to the limit parameter\n\t\tif limit > 0 {\n\t\t\tquery = append(query, bson.M{\"$project\": bson.M{\"date\": bson.M{\"$concat\": list{bson.M{\"$substr\": list{\"$_id.month\", 0, 4}},\n\t\t\t\t\"-\", bson.M{\"$substr\": list{\"$_id.month\", 4, 6}}}},\n\t\t\t\t\"status\": \"$_id.status\",\n\t\t\t\t\"top\": bson.M{\"$slice\": list{\"$top\", limit}}}})\n\t\t} else {\n\t\t\tquery = append(query, bson.M{\"$project\": bson.M{\"date\": bson.M{\"$concat\": list{bson.M{\"$substr\": list{\"$_id.month\", 0, 4}},\n\t\t\t\t\"-\", bson.M{\"$substr\": list{\"$_id.month\", 4, 6}}}},\n\t\t\t\t\"status\": \"$_id.status\",\n\t\t\t\t\"top\": \"$top\"}})\n\t\t}\n\n\t\t// sort end results by month bucket ascending\n\t\tquery = append(query, bson.M{\"$sort\": bson.D{{\"date\", 1}, {\"status\", 1}}})\n\n\t\terr = egroupCollection.Pipe(query).All(&results)\n\t\tif err != nil {\n\t\t\tcode = http.StatusInternalServerError\n\t\t\treturn code, h, output, err\n\t\t}\n\n\t\toutput, err = createStatusMonthEgroupListView(results, \"Success\", 200)\n\n\t\treturn code, h, output, err\n\n\t}\n\n\t// continue by calculating non monthly bucketed results\n\tresults := []StatusGroupEgroupData{}\n\n\tquery := []bson.M{\n\t\t{\"$match\": filter},\n\t\t{\"$group\": bson.M{\n\t\t\t\"_id\": bson.M{\n\t\t\t\t\"group\": \"$group\",\n\t\t\t\t\"status\": \"$status\"},\n\t\t\t\"duration\": bson.M{\"$sum\": \"$duration\"},\n\t\t}},\n\t\t{\"$sort\": bson.D{{\"_id.status\", 1}, {\"duration\", -1}}},\n\t\t{\n\t\t\t\"$group\": bson.M{\n\t\t\t\t\"_id\": bson.M{\"status\": \"$_id.status\"},\n\t\t\t\t\"top\": bson.M{\"$push\": bson.M{\"group\": \"$_id.group\", \"status\": \"$_id.status\", \"duration\": \"$duration\"}}}},\n\t}\n\n\t// trim down the list in each month-bucket according to the limit parameter\n\tif limit > 0 {\n\t\tquery = append(query, bson.M{\"$project\": bson.M{\"status\": \"$_id.status\",\n\t\t\t\"top\": bson.M{\"$slice\": list{\"$top\", limit}}}})\n\t} else {\n\t\tquery = append(query, bson.M{\"$project\": bson.M{\"status\": \"$_id.status\",\n\t\t\t\"top\": \"$top\"}})\n\t}\n\n\t// sort end results by month bucket ascending\n\tquery = append(query, bson.M{\"$sort\": bson.D{{\"status\", 1}}})\n\n\terr = egroupCollection.Pipe(query).All(&results)\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn code, h, output, err\n\t}\n\n\toutput, err = createStatusEgroupListView(results, \"Success\", 200)\n\n\treturn code, h, output, err\n}",
"func (hdl *DBHandler) GetStatus(info *Status) *[]Status {\n\tret := make([]Status, 0)\n\tquery := hdl.sqlDB\n\tif info.QuestionID > 0 {\n\t\tfmt.Println(\"NOT ZERO\")\n\t\tquery = query.Where(\"question_id=?\", info.QuestionID)\n\t}\n\tif info.UserID > 0 {\n\t\tquery = query.Where(\"user_id=?\", info.UserID)\n\t}\n\tquery.Order(\"ID DESC\").Find(&ret)\n\treturn &ret\n}",
"func (m *MockRoomStorage) GetRoomIDsByStatus(ctx context.Context, scheduler string, status game_room.GameRoomStatus) ([]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetRoomIDsByStatus\", ctx, scheduler, status)\n\tret0, _ := ret[0].([]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func GetEPrintIDsForDateType(config *Config, repoID string, dateType string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint\nWHERE ((date_type) = ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 1), 2, \"0\")) >= ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 28), 2, \"0\")) <= ?)\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, dateType, start, end)\n}",
"func GetTestsByStatus(statusList []string) ([]byte, error) {\n\tvar b []byte\n\tvar where string\n\tif len(statusList) == 0 {\n\t\tfmt.Println(\"no status provided in list\")\n\t\treturn b, fmt.Errorf(\"need to provide a status\")\n\t}\n\n\tfor i := range statusList {\n\t\tif i == 0 {\n\t\t\twhere = \"WHERE (t.status_id = (SELECT id from portal.test_status WHERE status=$1)\"\n\t\t\tcontinue\n\t\t}\n\t\twhere = where + fmt.Sprintf(\"\\nOR t.status_id = (SELECT id from portal.test_status WHERE status=$%v)\", i+1)\n\t}\n\twhere = where + \")\"\n\n\tquery := `SELECT t.id, t.name, s.status, r.result, array_agg(l.status), t.description, t.created, t.launched, t.stopped \n\tFROM portal.test t \n\tINNER JOIN portal.test_status s \n\tON t.status_id = s.id\n\tLEFT JOIN portal.test_results r\n\tON t.result_id = r.id\n\tLEFT JOIN portal.tests_labels tl\n\tON t.id = tl.test_id\n\tLEFT JOIN portal.labels l\n\tON l.id = tl.label_id\n\t` + where + `\n\tGROUP BY t.id, t.name, s.status, r.result, t.description, t.created, t.launched, t.stopped \n\tORDER BY t.created DESC\n\t`\n\n\ts := make([]interface{}, len(statusList))\n\tfor i, v := range statusList {\n\t\ts[i] = v\n\t}\n\n\trows, err := db.Query(query, s...)\n\tif err != nil {\n\t\tfmt.Println(\"received err:\", err)\n\t\treturn b, err\n\t}\n\tdefer rows.Close()\n\n\tvar tests []Test\n\tfor rows.Next() {\n\t\tvar id, name, status, result, desc, created, launched, stopped string\n\t\tvar sqlCreated, sqlLaunched, sqlStopped pq.NullTime\n\t\tvar labels []sql.NullString\n\t\tvar sqlResult sql.NullString\n\t\tif err := rows.Scan(&id, &name, &status, &sqlResult, pq.Array(&labels), &desc, &sqlCreated, &sqlLaunched, &sqlStopped); err != nil {\n\t\t\tfmt.Println(\"error scanning:\", err)\n\t\t\treturn b, err\n\t\t}\n\n\t\ttimeFormat := \"2006-01-02T15:04:05Z07:00\"\n\n\t\tif sqlResult.Valid {\n\t\t\tresult = sqlResult.String\n\t\t} else {\n\t\t\tresult = \"-\"\n\t\t}\n\n\t\tif sqlCreated.Valid {\n\t\t\tcreated = sqlCreated.Time.Format(timeFormat)\n\t\t} else {\n\t\t\tcreated = \"-\"\n\t\t}\n\n\t\tif sqlLaunched.Valid {\n\t\t\tlaunched = sqlLaunched.Time.Format(timeFormat)\n\t\t} else {\n\t\t\tlaunched = \"-\"\n\t\t}\n\n\t\tif sqlStopped.Valid {\n\t\t\tstopped = sqlStopped.Time.Format(timeFormat)\n\t\t} else {\n\t\t\tstopped = \"-\"\n\t\t}\n\n\t\ttests = append(tests, Test{id, name, desc, status, nullStringToStringSlice(labels), result, created, launched, stopped, \"\"})\n\t}\n\n\tif len(tests) == 0 {\n\t\treturn []byte(\"[]\"), nil\n\t}\n\n\tb, err = json.Marshal(tests)\n\tif err != nil {\n\t\tfmt.Println(\"marshal err:\", err)\n\t}\n\n\treturn b, err\n}",
"func (mr *MockRoomStorageMockRecorder) GetRoomIDsByStatus(ctx, scheduler, status interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetRoomIDsByStatus\", reflect.TypeOf((*MockRoomStorage)(nil).GetRoomIDsByStatus), ctx, scheduler, status)\n}",
"func (es *EmployeeService) Status(ctx context.Context, id int) (*EmployeeStatusResult, error) {\n\te := &EmployeeStatusResult{}\n\tvals := url.Values{}\n\tvals.Set(\"employeeId\", strconv.Itoa(id))\n\n\tif err := es.client.Request(ctx, http.MethodGet, employeeEndpoint.Action(status).Query(vals), nil, e); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn e, nil\n}",
"func (u *Users) Status(ctx context.Context, st Status) ([]keys.ID, error) {\n\titer, err := u.ds.DocumentIterator(context.TODO(), indexKID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkids := make([]keys.ID, 0, 100)\n\tfor {\n\t\tdoc, err := iter.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif doc == nil {\n\t\t\tbreak\n\t\t}\n\t\tvar keyDoc keyDocument\n\t\tif err := json.Unmarshal(doc.Data, &keyDoc); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif keyDoc.Result != nil {\n\t\t\tif keyDoc.Result.Status == st {\n\t\t\t\tkids = append(kids, keyDoc.Result.User.KID)\n\t\t\t}\n\t\t}\n\t}\n\titer.Release()\n\n\treturn kids, nil\n}",
"func StatusGTE(v int8) predicate.Post {\n\treturn predicate.Post(sql.FieldGTE(FieldStatus, v))\n}",
"func FormatStatus(status *idl.SubstepStatus) string {\n\tline, ok := lines[status.Step]\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"unexpected step %#v\", status.Step))\n\t}\n\n\treturn Format(line, status.Status)\n}",
"func GetEPrintIDsForYear(config *Config, repoID string, year int) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE date_type = \"published\" AND date_year = ? ORDER BY date_year DESC, date_month DESC, date_day DESC`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, year)\n}",
"func (r *JobRepository) GetByStatusAndBefore(ctxIn context.Context, status []repository.JobStatus, deltaHours int) ([]*repository.Job, error) {\n _, span := trace.StartSpan(ctxIn, \"(*JobRepository).GetByStatusAndBefore\")\n defer span.End()\n\n panic(\"implement me\")\n}",
"func (s *NsStatus) GetStatusList(alarmVersion, host, level string) []Status {\n\toutput := make([]Status, 0)\n\tStatusMu.RLock()\n\tdefer StatusMu.RUnlock()\n\tfor _, alarmStatus := range *s {\n\t\tfor _alarmVersion, hostStatus := range alarmStatus {\n\t\t\tif alarmVersion != \"\" && alarmVersion != string(_alarmVersion) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _host, hostStatus := range hostStatus {\n\t\t\t\tif host != \"\" && host != string(_host) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor _, tagStatus := range hostStatus {\n\t\t\t\t\tif tagStatus.Level == \"\" {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\ttagStatus.LastTime = ((time.Since(tagStatus.CreateTime)) / time.Second)\n\t\t\t\t\tif level == \"\" || tagStatus.Level == level {\n\t\t\t\t\t\toutput = append(output, tagStatus)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn output\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetEPrintIDsForDateType returns list of eprints in date range or returns an error | func GetEPrintIDsForDateType(config *Config, repoID string, dateType string, start string, end string) ([]int, error) {
stmt := fmt.Sprintf(`SELECT eprintid FROM eprint
WHERE ((date_type) = ?) AND
(CONCAT(date_year, "-",
LPAD(IFNULL(date_month, 1), 2, "0"), "-",
LPAD(IFNULL(date_day, 1), 2, "0")) >= ?) AND
(CONCAT(date_year, "-",
LPAD(IFNULL(date_month, 12), 2, "0"), "-",
LPAD(IFNULL(date_day, 28), 2, "0")) <= ?)
ORDER BY date_year DESC, date_month DESC, date_day DESC
`)
return sqlQueryIntIDs(config, repoID, stmt, dateType, start, end)
} | [
"func GetEPrintIDsWithStatusForDateType(config *Config, repoID string, status string, dateType string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint\nWHERE (eprint_status = ? ) AND (date_type = ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 1), 2, \"0\")) >= ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 28), 2, \"0\")) <= ?)\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, dateType, start, end)\n}",
"func GetEPrintIDsInTimestampRange(config *Config, repoID string, field string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 59), 2, \"0\")) <= ?)\nORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,\n\t\tfield, field, field, field, field, field, field, field, field, field, field, field,\n\t\tfield, field, field, field, field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, start, end)\n}",
"func GetEPrintIDsWithStatusInTimestampRange(config *Config, repoID string, status string, field string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE\n(eprint_status = ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 59), 2, \"0\")) <= ?)\nORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,\n\t\tfield, field, field, field, field, field,\n\t\tfield, field, field, field, field, field,\n\t\tfield, field, field, field, field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func GetAllEPrintIDs(config *Config, repoID string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint\nORDER BY date_year DESC, date_month DESC, date_day DESC`)\n}",
"func GetEPrintIDsWithStatus(config *Config, repoID string, status string, start string, end string) ([]int, error) {\n\tstmt := `SELECT eprintid FROM eprint WHERE (eprint_status = ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 59), 2, \"0\")) <= ?)\nORDER BY lastmod_year DESC, lastmod_month DESC, lastmod_day DESC,\n lastmod_hour DESC, lastmod_minute DESC, lastmod_minute DESC`\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func GetEPrintIDsForYear(config *Config, repoID string, year int) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE date_type = \"published\" AND date_year = ? ORDER BY date_year DESC, date_month DESC, date_day DESC`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, year)\n}",
"func GetAllEPrintIDsWithStatus(config *Config, repoID string, status string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint WHERE (eprint_status = ?) ORDER BY date_year DESC, date_month DESC, date_day DESC`, status)\n}",
"func GetEPrintIDsForPersonOrOrgID(config *Config, repoID string, personOrOrgType string, personOrOrgID string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprint_%s_id.eprintid AS eprintid\nFROM eprint_%s_id JOIN eprint ON (eprint_%s_id.eprintid = eprint.eprintid)\nWHERE eprint_%s_id.%s_id = ?\nORDER BY date_year DESC, date_month DESC, date_day DESC`,\n\t\tpersonOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType)\n\treturn sqlQueryIntIDs(config, repoID, stmt, personOrOrgID)\n}",
"func GetEPrintIDsForORCID(config *Config, repoID string, orcid string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprint.eprintid AS eprintid\nFROM eprint_creators_orcid JOIN eprint ON (eprint_creators_orcid.eprintid = eprint.eprintid)\nWHERE creators_orcid = ?\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`, orcid)\n}",
"func GetEPrintIDsForUniqueID(config *Config, repoID string, field string, value string) ([]int, error) {\n\t// NOTE: There should only be one eprint per DOI but we have dirty data because the field is not contrained as Unique\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE LOWER(%s) = LOWER(?)`, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, value)\n}",
"func (c *Client) FindIrQwebFieldDateIds(criteria *Criteria, options *Options) ([]int64, error) {\n\tids, err := c.Search(IrQwebFieldDateModel, criteria, options)\n\tif err != nil {\n\t\treturn []int64{}, err\n\t}\n\treturn ids, nil\n}",
"func GetEPrintIDsForPersonName(config *Config, repoID, field string, family string, given string) ([]int, error) {\n\tconditions := []string{}\n\tif strings.Contains(family, \"*\") || strings.Contains(given, \"%\") {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_family LIKE ?`, field))\n\t} else if family != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_family = ?`, field))\n\t}\n\tif strings.Contains(given, \"*\") || strings.Contains(given, \"%\") {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_given LIKE ?`, field))\n\t} else if given != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_given = ?`, field))\n\t}\n\tstmt := fmt.Sprintf(`SELECT eprint.eprintid AS eprintid\nFROM eprint_%s JOIN eprint ON (eprint_%s.eprintid = eprint.eprintid)\nWHERE %s\nORDER BY %s_family ASC, %s_given ASC, eprint.date_year DESC, eprint.date_month DESC, eprint.date_day DESC`,\n\t\tfield, field, strings.Join(conditions, \" AND \"), field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, family, given)\n}",
"func (VS *Server) getAuditsByDates(c *gin.Context, from int64, to int64, class string, method string, operation string) []Audit {\n\tvar aud []Audit\n\terr := VS.DataBase.Find(&aud, bolthold.Where(\"Date\").Gt(from).And(\"Date\").Lt(to))\n\tVS.checkOperation(c, class, method, operation, err)\n\treturn aud\n}",
"func GetDayRange(start string, end string, format string) ([]string, error) {\n\tret := []string{}\n\tvar err error\n\n\tstartTime, err := time.Parse(format, start)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tendTime, err := time.Parse(format, end)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif startTime.Unix() > endTime.Unix() {\n\t\treturn ret, errors.New(\"start bigger than end\")\n\t}\n\n\tfor !startTime.Equal(endTime) {\n\t\ttimeStr := startTime.Format(format)\n\t\tret = append(ret, timeStr)\n\t\tstartTime = startTime.Add(24 * time.Hour)\n\t}\n\n\treturn ret, nil\n}",
"func dateRange(params ...interface{}) bool {\r\n\tr, err := subDateRange(time.Now(), params...)\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\treturn r\r\n}",
"func ImportEPrints(config *Config, repoID string, ds *DataSource, eprints *EPrints) ([]int, error) {\n\tvar importErrors error\n\tids := []int{}\n\n\tif config.Connections == nil {\n\t\treturn nil, fmt.Errorf(`no databases are not configured`)\n\t}\n\t_, ok := config.Connections[repoID]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(`%s database connection not configured`, repoID)\n\t}\n\n\t// Check to make sure updates are allowed if non-Zero\n\t// eprint ids present.\n\tfor _, eprint := range eprints.EPrint {\n\t\tif eprint.EPrintID != 0 {\n\t\t\treturn nil, fmt.Errorf(\"create failed eprint id %d in %s\", eprint.EPrintID, repoID)\n\t\t}\n\t\tif eprint.Collection == \"\" && ds.DefaultCollection != \"\" {\n\t\t\teprint.Collection = DefaultCollection\n\t\t}\n\t\tif eprint.IDNumber == \"\" && ds.DefaultOfficialURL != \"\" {\n\t\t\teprint.IDNumber = GenerateIDNumber(eprint)\n\t\t}\n\t\tif eprint.OfficialURL == \"\" && ds.DefaultOfficialURL != \"\" {\n\t\t\teprint.OfficialURL = GenerateOfficialURL(eprint)\n\t\t}\n\t\tif eprint.Rights == \"\" && ds.DefaultRights != \"\" {\n\t\t\teprint.Rights = ds.DefaultRights\n\t\t}\n\t\tif eprint.Refereed == \"\" && eprint.Type == \"article\" &&\n\t\t\tds.DefaultRefereed != \"\" {\n\t\t\teprint.Refereed = ds.DefaultRefereed\n\t\t}\n\t\tif eprint.EPrintStatus == \"\" && ds.DefaultStatus != \"\" {\n\t\t\teprint.EPrintStatus = ds.DefaultStatus\n\t\t}\n\t\tif eprint.Abstract != \"\" && ds.StripTags {\n\t\t\tif cleaner.HasEncodedElements([]byte(eprint.Abstract)) {\n\t\t\t\teprint.Abstract = string(cleaner.StripTags([]byte(eprint.Abstract)))\n\t\t\t}\n\t\t}\n\t}\n\tfor _, eprint := range eprints.EPrint {\n\t\tid, err := SQLCreateEPrint(config, repoID, ds, eprint)\n\t\tif err != nil {\n\t\t\tif importErrors == nil {\n\t\t\t\timportErrors = err\n\t\t\t} else {\n\t\t\t\timportErrors = fmt.Errorf(\"%s; %s\", importErrors, err)\n\t\t\t}\n\t\t}\n\t\tids = append(ids, id)\n\t}\n\treturn ids, importErrors\n}",
"func (VS *Server) getAuditsByServiceAndDate(c *gin.Context, from int64, to int64, IDservice bson.ObjectId, class string, method string, operation string) []Audit {\n\tvar aud []Audit\n\terr := VS.DataBase.Find(&aud, bolthold.Where(\"IDservice\").Eq(IDservice).And(\"Date\").Gt(from).And(\"Date\").Lt(to))\n\tVS.checkOperation(c, class, method, operation, err)\n\treturn aud\n}",
"func getDateRange(dateFrom, dateTo string, daysAfter int) []string {\n\n\tregExpStr := []string{}\n\t//'20160630'\n\tyy, mm, dd := convertToDateParts(dateFrom)\n\tdtFrom := time.Date(yy, time.Month(mm), dd, 0, 0, 0, 0, time.UTC)\n\tif verbose {\n\t\tlog.Printf(\"Provided From: string:%s, parsed into: %d, %d, %d, converted into %v\\n\",\n\t\t\tdateFrom, yy, mm, dd, dtFrom.String())\n\t}\n\n\tyy, mm, dd = convertToDateParts(dateTo)\n\tdtTo := time.Date(yy, time.Month(mm), dd, 0, 0, 0, 0, time.UTC)\n\n\tif verbose {\n\t\tlog.Printf(\"Provided To: string:%s, parsed into: %d, %d, %d, converted into %v\\n\",\n\t\t\tdateTo, yy, mm, dd, dtTo.String())\n\t}\n\n\tif dtFrom.After(dtTo) { // || dtFrom.Equal(dtTo) {\n\t\tlog.Printf(\"Date from %v is greater or equal than date to: %v\\n\", dtFrom, dtTo)\n\t\tlog.Println(\"Nothing to do\")\n\t\tos.Exit(-1)\n\t}\n\n\tdtFrom = dtFrom.AddDate(0, 0, -1)\n\tdtTo = dtTo.AddDate(0, 0, daysAfter)\n\n\tif verbose {\n\t\tlog.Println(\"Working From:\", dtFrom.String())\n\t\tlog.Println(\"Working To:\", dtTo.String())\n\t}\n\n\tdt := dtFrom\n\tfor {\n\t\tregExpStr = append(regExpStr, dt.Format(\"20060102\"))\n\t\tif verbose {\n\t\t\tlog.Printf(\"Appending for %s = %s\\n\", dt.String(), dt.Format(\"20060102\"))\n\t\t}\n\n\t\tdt = dt.AddDate(0, 0, 1)\n\t\tif dt.After(dtTo) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn regExpStr\n}",
"func GetTimecardEmployees(ctx *fiber.Ctx) error {\n\tqueryvals := TimecardEmployeeFilters{}\n\tif err := ctx.QueryParser(&queryvals); err != nil {\n\t\treturn common.BadRequestError(ctx, \"Invalid query parameters\")\n\t}\n\tif err := queryvals.Validate(); err != nil {\n\t\treturn common.BadRequestError(ctx, \"Bad query params\")\n\t}\n\n\ttimecards := getTimecards(TimecardFilters{\n\t\tStartDate: queryvals.StartDate,\n\t\tEndDate: queryvals.EndDate,\n\t})\n\n\ttimecardIDs := []string{}\n\tfor _, tc := range timecards {\n\t\ttimecardIDs = append(timecardIDs, tc.ID)\n\t}\n\n\tdb := common.GetDB()\n\temployees := []TimecardEmployee{}\n\tif err := db.Preload(\"Hours\").Where(\"timecard_id IN ?\", timecardIDs).Find(&employees).Error; err != nil {\n\t\treturn err\n\t}\n\n\treturn ctx.JSON(employees)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetEPrintIDsWithStatusForDateType returns list of eprints in date range for a given status or returns an error | func GetEPrintIDsWithStatusForDateType(config *Config, repoID string, status string, dateType string, start string, end string) ([]int, error) {
stmt := fmt.Sprintf(`SELECT eprintid FROM eprint
WHERE (eprint_status = ? ) AND (date_type = ?) AND
(CONCAT(date_year, "-",
LPAD(IFNULL(date_month, 1), 2, "0"), "-",
LPAD(IFNULL(date_day, 1), 2, "0")) >= ?) AND
(CONCAT(date_year, "-",
LPAD(IFNULL(date_month, 12), 2, "0"), "-",
LPAD(IFNULL(date_day, 28), 2, "0")) <= ?)
ORDER BY date_year DESC, date_month DESC, date_day DESC
`)
return sqlQueryIntIDs(config, repoID, stmt, status, dateType, start, end)
} | [
"func GetEPrintIDsForDateType(config *Config, repoID string, dateType string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint\nWHERE ((date_type) = ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 1), 2, \"0\")) >= ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 28), 2, \"0\")) <= ?)\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, dateType, start, end)\n}",
"func GetEPrintIDsWithStatus(config *Config, repoID string, status string, start string, end string) ([]int, error) {\n\tstmt := `SELECT eprintid FROM eprint WHERE (eprint_status = ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 59), 2, \"0\")) <= ?)\nORDER BY lastmod_year DESC, lastmod_month DESC, lastmod_day DESC,\n lastmod_hour DESC, lastmod_minute DESC, lastmod_minute DESC`\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func GetAllEPrintIDsWithStatus(config *Config, repoID string, status string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint WHERE (eprint_status = ?) ORDER BY date_year DESC, date_month DESC, date_day DESC`, status)\n}",
"func GetEPrintIDsWithStatusInTimestampRange(config *Config, repoID string, status string, field string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE\n(eprint_status = ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 59), 2, \"0\")) <= ?)\nORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,\n\t\tfield, field, field, field, field, field,\n\t\tfield, field, field, field, field, field,\n\t\tfield, field, field, field, field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func GetEPrintIDsInTimestampRange(config *Config, repoID string, field string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 59), 2, \"0\")) <= ?)\nORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,\n\t\tfield, field, field, field, field, field, field, field, field, field, field, field,\n\t\tfield, field, field, field, field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, start, end)\n}",
"func ListStatusEgroups(r *http.Request, cfg config.Config) (int, http.Header, []byte, error) {\n\n\t//STANDARD DECLARATIONS START\n\n\tcode := http.StatusOK\n\th := http.Header{}\n\toutput := []byte(\"List Flapping Metrics\")\n\terr := error(nil)\n\tcharset := \"utf-8\"\n\n\t//STANDARD DECLARATIONS END\n\n\t// Set Content-Type response Header value\n\tcontentType := r.Header.Get(\"Accept\")\n\th.Set(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Parse the request into the input\n\turlValues := r.URL.Query()\n\tvars := mux.Vars(r)\n\n\t// Grab Tenant DB configuration from context\n\ttenantDbConfig := context.Get(r, \"tenant_conf\").(config.MongoConfig)\n\n\tsession, err := mongo.OpenSession(tenantDbConfig)\n\tdefer mongo.CloseSession(session)\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn code, h, output, err\n\t}\n\n\tegroupCollection := session.DB(tenantDbConfig.Db).C(statusEgroups)\n\n\t// Query the detailed status services trend results\n\treportID, err := mongo.GetReportID(session, tenantDbConfig.Db, vars[\"report_name\"])\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn code, h, output, err\n\t}\n\n\tstartDate, endDate, err := getDateRange(urlValues)\n\tif err != nil {\n\t\tcode = http.StatusBadRequest\n\t\toutput, _ = respond.MarshalContent(respond.ErrBadRequestDetails(err.Error()), contentType, \"\", \" \")\n\t\treturn code, h, output, err\n\t}\n\n\tlimit := -1\n\tlimStr := urlValues.Get(\"top\")\n\tif limStr != \"\" {\n\t\tlimit, err = strconv.Atoi(limStr)\n\t\tif err != nil {\n\t\t\tcode = http.StatusInternalServerError\n\t\t\treturn code, h, output, err\n\t\t}\n\t}\n\n\tgranularity := urlValues.Get(\"granularity\")\n\n\t// query for endpoint groups\n\tfilter := bson.M{\"report\": reportID, \"date\": bson.M{\"$gte\": startDate, \"$lte\": endDate}}\n\n\t// apply query for bucketed monthly results if granularity is set to monthly\n\tif granularity == \"monthly\" {\n\n\t\tresults := []StatusMonthEgroupData{}\n\n\t\tquery := []bson.M{\n\t\t\t{\"$match\": filter},\n\t\t\t{\"$group\": bson.M{\n\t\t\t\t\"_id\": bson.M{\n\t\t\t\t\t\"month\": bson.M{\"$substr\": list{\"$date\", 0, 6}},\n\t\t\t\t\t\"group\": \"$group\",\n\t\t\t\t\t\"status\": \"$status\"},\n\t\t\t\t\"duration\": bson.M{\"$sum\": \"$duration\"},\n\t\t\t}},\n\t\t\t{\"$sort\": bson.D{{\"_id.month\", 1}, {\"_id.status\", 1}, {\"duration\", -1}}},\n\t\t\t{\n\t\t\t\t\"$group\": bson.M{\n\t\t\t\t\t\"_id\": bson.M{\"month\": \"$_id.month\", \"status\": \"$_id.status\"},\n\t\t\t\t\t\"top\": bson.M{\"$push\": bson.M{\"group\": \"$_id.group\", \"status\": \"$_id.status\", \"duration\": \"$duration\"}}}},\n\t\t}\n\n\t\t// trim down the list in each month-bucket according to the limit parameter\n\t\tif limit > 0 {\n\t\t\tquery = append(query, bson.M{\"$project\": bson.M{\"date\": bson.M{\"$concat\": list{bson.M{\"$substr\": list{\"$_id.month\", 0, 4}},\n\t\t\t\t\"-\", bson.M{\"$substr\": list{\"$_id.month\", 4, 6}}}},\n\t\t\t\t\"status\": \"$_id.status\",\n\t\t\t\t\"top\": bson.M{\"$slice\": list{\"$top\", limit}}}})\n\t\t} else {\n\t\t\tquery = append(query, bson.M{\"$project\": bson.M{\"date\": bson.M{\"$concat\": list{bson.M{\"$substr\": list{\"$_id.month\", 0, 4}},\n\t\t\t\t\"-\", bson.M{\"$substr\": list{\"$_id.month\", 4, 6}}}},\n\t\t\t\t\"status\": \"$_id.status\",\n\t\t\t\t\"top\": \"$top\"}})\n\t\t}\n\n\t\t// sort end results by month bucket ascending\n\t\tquery = append(query, bson.M{\"$sort\": bson.D{{\"date\", 1}, {\"status\", 1}}})\n\n\t\terr = egroupCollection.Pipe(query).All(&results)\n\t\tif err != nil {\n\t\t\tcode = http.StatusInternalServerError\n\t\t\treturn code, h, output, err\n\t\t}\n\n\t\toutput, err = createStatusMonthEgroupListView(results, \"Success\", 200)\n\n\t\treturn code, h, output, err\n\n\t}\n\n\t// continue by calculating non monthly bucketed results\n\tresults := []StatusGroupEgroupData{}\n\n\tquery := []bson.M{\n\t\t{\"$match\": filter},\n\t\t{\"$group\": bson.M{\n\t\t\t\"_id\": bson.M{\n\t\t\t\t\"group\": \"$group\",\n\t\t\t\t\"status\": \"$status\"},\n\t\t\t\"duration\": bson.M{\"$sum\": \"$duration\"},\n\t\t}},\n\t\t{\"$sort\": bson.D{{\"_id.status\", 1}, {\"duration\", -1}}},\n\t\t{\n\t\t\t\"$group\": bson.M{\n\t\t\t\t\"_id\": bson.M{\"status\": \"$_id.status\"},\n\t\t\t\t\"top\": bson.M{\"$push\": bson.M{\"group\": \"$_id.group\", \"status\": \"$_id.status\", \"duration\": \"$duration\"}}}},\n\t}\n\n\t// trim down the list in each month-bucket according to the limit parameter\n\tif limit > 0 {\n\t\tquery = append(query, bson.M{\"$project\": bson.M{\"status\": \"$_id.status\",\n\t\t\t\"top\": bson.M{\"$slice\": list{\"$top\", limit}}}})\n\t} else {\n\t\tquery = append(query, bson.M{\"$project\": bson.M{\"status\": \"$_id.status\",\n\t\t\t\"top\": \"$top\"}})\n\t}\n\n\t// sort end results by month bucket ascending\n\tquery = append(query, bson.M{\"$sort\": bson.D{{\"status\", 1}}})\n\n\terr = egroupCollection.Pipe(query).All(&results)\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn code, h, output, err\n\t}\n\n\toutput, err = createStatusEgroupListView(results, \"Success\", 200)\n\n\treturn code, h, output, err\n}",
"func GetEPrintIDsForPersonOrOrgID(config *Config, repoID string, personOrOrgType string, personOrOrgID string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprint_%s_id.eprintid AS eprintid\nFROM eprint_%s_id JOIN eprint ON (eprint_%s_id.eprintid = eprint.eprintid)\nWHERE eprint_%s_id.%s_id = ?\nORDER BY date_year DESC, date_month DESC, date_day DESC`,\n\t\tpersonOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType)\n\treturn sqlQueryIntIDs(config, repoID, stmt, personOrOrgID)\n}",
"func GetEPrintIDsForYear(config *Config, repoID string, year int) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE date_type = \"published\" AND date_year = ? ORDER BY date_year DESC, date_month DESC, date_day DESC`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, year)\n}",
"func (r *JobRepository) GetByJobTypeAndStatus(ctxIn context.Context, backupType repository.BackupType, jobStatus ...repository.JobStatus) (jobs []*repository.Job, err error) {\n _, span := trace.StartSpan(ctxIn, \"(*JobRepository).GetByJobTypeAndStatus\")\n defer span.End()\n\n for _, status := range jobStatus {\n for _, j := range r.jobs {\n if j.Type == backupType && j.Status == status {\n jobs = append(jobs, j)\n }\n }\n }\n return jobs, err\n}",
"func GetAllEPrintIDs(config *Config, repoID string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint\nORDER BY date_year DESC, date_month DESC, date_day DESC`)\n}",
"func (ctrl *Controller) Status(s EStatus) []*Elevator {\n\tvar elevators []*Elevator\n\tfor _, el := range ctrl.Elevators {\n\t\tif el.Status == s {\n\t\t\televators = append(elevators, el)\n\t\t}\n\t}\n\treturn elevators\n}",
"func (VS *Server) getAuditsByServiceAndDate(c *gin.Context, from int64, to int64, IDservice bson.ObjectId, class string, method string, operation string) []Audit {\n\tvar aud []Audit\n\terr := VS.DataBase.Find(&aud, bolthold.Where(\"IDservice\").Eq(IDservice).And(\"Date\").Gt(from).And(\"Date\").Lt(to))\n\tVS.checkOperation(c, class, method, operation, err)\n\treturn aud\n}",
"func ListStatusServices(r *http.Request, cfg config.Config) (int, http.Header, []byte, error) {\n\n\t//STANDARD DECLARATIONS START\n\n\tcode := http.StatusOK\n\th := http.Header{}\n\toutput := []byte(\"List Flapping Metrics\")\n\terr := error(nil)\n\tcharset := \"utf-8\"\n\n\t//STANDARD DECLARATIONS END\n\n\t// Set Content-Type response Header value\n\tcontentType := r.Header.Get(\"Accept\")\n\th.Set(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Parse the request into the input\n\turlValues := r.URL.Query()\n\tvars := mux.Vars(r)\n\n\t// Grab Tenant DB configuration from context\n\ttenantDbConfig := context.Get(r, \"tenant_conf\").(config.MongoConfig)\n\n\tsession, err := mongo.OpenSession(tenantDbConfig)\n\tdefer mongo.CloseSession(session)\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn code, h, output, err\n\t}\n\n\tserviceCollection := session.DB(tenantDbConfig.Db).C(statusServices)\n\n\t// Query the detailed status services trend results\n\treportID, err := mongo.GetReportID(session, tenantDbConfig.Db, vars[\"report_name\"])\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn code, h, output, err\n\t}\n\n\tstartDate, endDate, err := getDateRange(urlValues)\n\tif err != nil {\n\t\tcode = http.StatusBadRequest\n\t\toutput, _ = respond.MarshalContent(respond.ErrBadRequestDetails(err.Error()), contentType, \"\", \" \")\n\t\treturn code, h, output, err\n\t}\n\n\tlimit := -1\n\tlimStr := urlValues.Get(\"top\")\n\tif limStr != \"\" {\n\t\tlimit, err = strconv.Atoi(limStr)\n\t\tif err != nil {\n\t\t\tcode = http.StatusInternalServerError\n\t\t\treturn code, h, output, err\n\t\t}\n\t}\n\n\tgranularity := urlValues.Get(\"granularity\")\n\n\t// query for services\n\tfilter := bson.M{\"report\": reportID, \"date\": bson.M{\"$gte\": startDate, \"$lte\": endDate}}\n\n\t// apply query for bucketed monthly results if granularity is set to monthly\n\tif granularity == \"monthly\" {\n\n\t\tresults := []StatusMonthServiceData{}\n\n\t\tquery := []bson.M{\n\t\t\t{\"$match\": filter},\n\t\t\t{\"$group\": bson.M{\n\t\t\t\t\"_id\": bson.M{\n\t\t\t\t\t\"month\": bson.M{\"$substr\": list{\"$date\", 0, 6}},\n\t\t\t\t\t\"group\": \"$group\",\n\t\t\t\t\t\"service\": \"$service\",\n\t\t\t\t\t\"status\": \"$status\"},\n\t\t\t\t\"duration\": bson.M{\"$sum\": \"$duration\"},\n\t\t\t}},\n\t\t\t{\"$sort\": bson.D{{\"_id.month\", 1}, {\"_id.status\", 1}, {\"duration\", -1}}},\n\t\t\t{\n\t\t\t\t\"$group\": bson.M{\n\t\t\t\t\t\"_id\": bson.M{\"month\": \"$_id.month\", \"status\": \"$_id.status\"},\n\t\t\t\t\t\"top\": bson.M{\"$push\": bson.M{\"group\": \"$_id.group\", \"service\": \"$_id.service\", \"status\": \"$_id.status\", \"duration\": \"$duration\"}}}},\n\t\t}\n\n\t\t// trim down the list in each month-bucket according to the limit parameter\n\t\tif limit > 0 {\n\t\t\tquery = append(query, bson.M{\"$project\": bson.M{\"date\": bson.M{\"$concat\": list{bson.M{\"$substr\": list{\"$_id.month\", 0, 4}},\n\t\t\t\t\"-\", bson.M{\"$substr\": list{\"$_id.month\", 4, 6}}}},\n\t\t\t\t\"status\": \"$_id.status\",\n\t\t\t\t\"top\": bson.M{\"$slice\": list{\"$top\", limit}}}})\n\t\t} else {\n\t\t\tquery = append(query, bson.M{\"$project\": bson.M{\"date\": bson.M{\"$concat\": list{bson.M{\"$substr\": list{\"$_id.month\", 0, 4}},\n\t\t\t\t\"-\", bson.M{\"$substr\": list{\"$_id.month\", 4, 6}}}},\n\t\t\t\t\"status\": \"$_id.status\",\n\t\t\t\t\"top\": \"$top\"}})\n\t\t}\n\n\t\t// sort end results by month bucket ascending\n\t\tquery = append(query, bson.M{\"$sort\": bson.D{{\"date\", 1}, {\"status\", 1}}})\n\n\t\terr = serviceCollection.Pipe(query).All(&results)\n\t\tif err != nil {\n\t\t\tcode = http.StatusInternalServerError\n\t\t\treturn code, h, output, err\n\t\t}\n\n\t\toutput, err = createStatusMonthServiceListView(results, \"Success\", 200)\n\n\t\treturn code, h, output, err\n\n\t}\n\n\t// continue by calculating non monthly bucketed results\n\tresults := []StatusGroupServiceData{}\n\n\tquery := []bson.M{\n\t\t{\"$match\": filter},\n\t\t{\"$group\": bson.M{\n\t\t\t\"_id\": bson.M{\n\t\t\t\t\"group\": \"$group\",\n\t\t\t\t\"service\": \"$service\",\n\t\t\t\t\"status\": \"$status\"},\n\t\t\t\"duration\": bson.M{\"$sum\": \"$duration\"},\n\t\t}},\n\t\t{\"$sort\": bson.D{{\"_id.status\", 1}, {\"duration\", -1}}},\n\t\t{\n\t\t\t\"$group\": bson.M{\n\t\t\t\t\"_id\": bson.M{\"status\": \"$_id.status\"},\n\t\t\t\t\"top\": bson.M{\"$push\": bson.M{\"group\": \"$_id.group\", \"service\": \"$_id.service\", \"status\": \"$_id.status\", \"duration\": \"$duration\"}}}},\n\t}\n\n\t// trim down the list in each month-bucket according to the limit parameter\n\tif limit > 0 {\n\t\tquery = append(query, bson.M{\"$project\": bson.M{\"status\": \"$_id.status\",\n\t\t\t\"top\": bson.M{\"$slice\": list{\"$top\", limit}}}})\n\t} else {\n\t\tquery = append(query, bson.M{\"$project\": bson.M{\"status\": \"$_id.status\",\n\t\t\t\"top\": \"$top\"}})\n\t}\n\n\t// sort end results by month bucket ascending\n\tquery = append(query, bson.M{\"$sort\": bson.D{{\"status\", 1}}})\n\n\terr = serviceCollection.Pipe(query).All(&results)\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn code, h, output, err\n\t}\n\n\toutput, err = createStatusServiceListView(results, \"Success\", 200)\n\n\treturn code, h, output, err\n}",
"func (s *Service) ListStatus(envelopeIdsRequest *model.EnvelopeIdsRequest) *ListStatusOp {\n\treturn &ListStatusOp{\n\t\tCredential: s.credential,\n\t\tMethod: \"PUT\",\n\t\tPath: \"envelopes/status\",\n\t\tPayload: envelopeIdsRequest,\n\t\tQueryOpts: make(url.Values),\n\t\tVersion: esign.VersionV21,\n\t}\n}",
"func ListStatusEndpoints(r *http.Request, cfg config.Config) (int, http.Header, []byte, error) {\n\n\t//STANDARD DECLARATIONS START\n\n\tcode := http.StatusOK\n\th := http.Header{}\n\toutput := []byte(\"List Flapping Metrics\")\n\terr := error(nil)\n\tcharset := \"utf-8\"\n\n\t//STANDARD DECLARATIONS END\n\n\t// Set Content-Type response Header value\n\tcontentType := r.Header.Get(\"Accept\")\n\th.Set(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Parse the request into the input\n\turlValues := r.URL.Query()\n\tvars := mux.Vars(r)\n\n\t// Grab Tenant DB configuration from context\n\ttenantDbConfig := context.Get(r, \"tenant_conf\").(config.MongoConfig)\n\n\tsession, err := mongo.OpenSession(tenantDbConfig)\n\tdefer mongo.CloseSession(session)\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn code, h, output, err\n\t}\n\n\tendpointCollection := session.DB(tenantDbConfig.Db).C(statusEndpoints)\n\n\t// Query the detailed status endpoints trend results\n\treportID, err := mongo.GetReportID(session, tenantDbConfig.Db, vars[\"report_name\"])\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn code, h, output, err\n\t}\n\n\tstartDate, endDate, err := getDateRange(urlValues)\n\tif err != nil {\n\t\tcode = http.StatusBadRequest\n\t\toutput, _ = respond.MarshalContent(respond.ErrBadRequestDetails(err.Error()), contentType, \"\", \" \")\n\t\treturn code, h, output, err\n\t}\n\n\tlimit := -1\n\tlimStr := urlValues.Get(\"top\")\n\tif limStr != \"\" {\n\t\tlimit, err = strconv.Atoi(limStr)\n\t\tif err != nil {\n\t\t\tcode = http.StatusInternalServerError\n\t\t\treturn code, h, output, err\n\t\t}\n\t}\n\n\tgranularity := urlValues.Get(\"granularity\")\n\n\t// query for endpoints\n\tfilter := bson.M{\"report\": reportID, \"date\": bson.M{\"$gte\": startDate, \"$lte\": endDate}}\n\n\t// apply query for bucketed monthly results if granularity is set to monthly\n\tif granularity == \"monthly\" {\n\n\t\tresults := []StatusMonthEndpointData{}\n\n\t\tquery := []bson.M{\n\t\t\t{\"$match\": filter},\n\t\t\t{\"$group\": bson.M{\n\t\t\t\t\"_id\": bson.M{\n\t\t\t\t\t\"month\": bson.M{\"$substr\": list{\"$date\", 0, 6}},\n\t\t\t\t\t\"group\": \"$group\",\n\t\t\t\t\t\"service\": \"$service\",\n\t\t\t\t\t\"endpoint\": \"$endpoint\",\n\t\t\t\t\t\"status\": \"$status\"},\n\t\t\t\t\"duration\": bson.M{\"$sum\": \"$duration\"},\n\t\t\t}},\n\t\t\t{\"$sort\": bson.D{{\"_id.month\", 1}, {\"_id.status\", 1}, {\"duration\", -1}}},\n\t\t\t{\n\t\t\t\t\"$group\": bson.M{\n\t\t\t\t\t\"_id\": bson.M{\"month\": \"$_id.month\", \"status\": \"$_id.status\"},\n\t\t\t\t\t\"top\": bson.M{\"$push\": bson.M{\"group\": \"$_id.group\", \"service\": \"$_id.service\", \"endpoint\": \"$_id.endpoint\", \"status\": \"$_id.status\", \"duration\": \"$duration\"}}}},\n\t\t}\n\n\t\t// trim down the list in each month-bucket according to the limit parameter\n\t\tif limit > 0 {\n\t\t\tquery = append(query, bson.M{\"$project\": bson.M{\"date\": bson.M{\"$concat\": list{bson.M{\"$substr\": list{\"$_id.month\", 0, 4}},\n\t\t\t\t\"-\", bson.M{\"$substr\": list{\"$_id.month\", 4, 6}}}},\n\t\t\t\t\"status\": \"$_id.status\",\n\t\t\t\t\"top\": bson.M{\"$slice\": list{\"$top\", limit}}}})\n\t\t} else {\n\t\t\tquery = append(query, bson.M{\"$project\": bson.M{\"date\": bson.M{\"$concat\": list{bson.M{\"$substr\": list{\"$_id.month\", 0, 4}},\n\t\t\t\t\"-\", bson.M{\"$substr\": list{\"$_id.month\", 4, 6}}}},\n\t\t\t\t\"status\": \"$_id.status\",\n\t\t\t\t\"top\": \"$top\"}})\n\t\t}\n\n\t\t// sort end results by month bucket ascending\n\t\tquery = append(query, bson.M{\"$sort\": bson.D{{\"date\", 1}, {\"status\", 1}}})\n\n\t\terr = endpointCollection.Pipe(query).All(&results)\n\t\tif err != nil {\n\t\t\tcode = http.StatusInternalServerError\n\t\t\treturn code, h, output, err\n\t\t}\n\n\t\toutput, err = createStatusMonthEndpointListView(results, \"Success\", 200)\n\n\t\treturn code, h, output, err\n\n\t}\n\n\t// continue by calculating non monthly bucketed results\n\tresults := []StatusGroupEndpointData{}\n\n\tquery := []bson.M{\n\t\t{\"$match\": filter},\n\t\t{\"$group\": bson.M{\n\t\t\t\"_id\": bson.M{\n\t\t\t\t\"group\": \"$group\",\n\t\t\t\t\"service\": \"$service\",\n\t\t\t\t\"endpoint\": \"$endpoint\",\n\t\t\t\t\"status\": \"$status\"},\n\t\t\t\"duration\": bson.M{\"$sum\": \"$duration\"},\n\t\t}},\n\t\t{\"$sort\": bson.D{{\"_id.status\", 1}, {\"duration\", -1}}},\n\t\t{\n\t\t\t\"$group\": bson.M{\n\t\t\t\t\"_id\": bson.M{\"status\": \"$_id.status\"},\n\t\t\t\t\"top\": bson.M{\"$push\": bson.M{\"group\": \"$_id.group\", \"service\": \"$_id.service\", \"endpoint\": \"$_id.endpoint\", \"status\": \"$_id.status\", \"duration\": \"$duration\"}}}},\n\t}\n\n\t// trim down the list in each month-bucket according to the limit parameter\n\tif limit > 0 {\n\t\tquery = append(query, bson.M{\"$project\": bson.M{\"status\": \"$_id.status\",\n\t\t\t\"top\": bson.M{\"$slice\": list{\"$top\", limit}}}})\n\t} else {\n\t\tquery = append(query, bson.M{\"$project\": bson.M{\"status\": \"$_id.status\",\n\t\t\t\"top\": \"$top\"}})\n\t}\n\n\t// sort end results by month bucket ascending\n\tquery = append(query, bson.M{\"$sort\": bson.D{{\"status\", 1}}})\n\n\terr = endpointCollection.Pipe(query).All(&results)\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn code, h, output, err\n\t}\n\n\toutput, err = createStatusEndpointListView(results, \"Success\", 200)\n\n\treturn code, h, output, err\n}",
"func (VS *Server) getAuditsByDates(c *gin.Context, from int64, to int64, class string, method string, operation string) []Audit {\n\tvar aud []Audit\n\terr := VS.DataBase.Find(&aud, bolthold.Where(\"Date\").Gt(from).And(\"Date\").Lt(to))\n\tVS.checkOperation(c, class, method, operation, err)\n\treturn aud\n}",
"func (r ReservationRepository) GetByDates(location int, statusID int, startDate time.Time, endDate time.Time) ([]interface{}, error) {\n\tvar sqlStm = `\n\t\t\t\tSELECT \ta.id, \n\t\t\t\t\t\ta.uid,\n\t\t\t\t\t\ta.date,\n\t\t\t\t\t\ta.time_limit,\n\t\t\t\t\t\ta.guests,\n\t\t\t\t\t\ta.timestamp, \n\t\t\t\t\t\ta.updated,\n\t\t\t\t\t\ta.client_info_id,\n\t\t\t\t\t\tb.first_name,\n\t\t\t\t\t\tb.last_name,\n\t\t\t\t\t\tb.email,\n\t\t\t\t\t\tb.phone,\n\t\t\t\t\t\ta.status,\n\t\t\t\t\t\tc.description,\n\t\t\t\t\t\tc.value,\n\t\t\t\t\t\ta.table_id,\n\t\t\t\t\t\td.name,\n\t\t\t\t\t\td.description,\n\t\t\t\t\t\td.img_url,\n\t\t\t\t\t\td.max_guests,\n\t\t\t\t\t\td.area_id,\n\t\t\t\t\t\tarea.name,\n\t\t\t\t\t\tarea.description,\n\t\t\t\t\t\tarea.img_url,\n\t\t\t\t\t\ta.location_id,\n\t\t\t\t\t\te.name,\n\t\t\t\t\t\te.business_id\n\t\t\t\tFROM \t\t\treservations_reservation a\n\t\t\t\t\tINNER JOIN \treservations_client_info b ON a.client_info_id = b.id\n\t\t\t\t\tINNER JOIN \treservations_reservation_status c ON a.status = c.id\n\t\t\t\t\tINNER JOIN \treservations_table d ON a.table_id = d.id\n\t\t\t\t\tINNER JOIN \treservations_area area ON d.area_id = area.id\n\t\t\t\t\tINNER JOIN \treservations_location e ON a.location_id = e.id\n\t\t\t\tWHERE \ta.location_id = $1\n\t\t\t\t\tAND (SELECT CASE WHEN ( $2 <> 0 )\n\t\t\t\t\t\t\tTHEN ( a.status = $2 )\n\t\t\t\t\t\t\tELSE ( a.status > 0 ) END )\n\t\t\t\t\tAND a.date BETWEEN $3 AND $4`\n\n\tvar objects []models.ReservationModel\n\n\trows, err := r.DB.Query(sqlStm, location, statusID, startDate, endDate)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s\", err)\n\t}\n\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\treservation := models.ReservationModel{\n\t\t\tClientInfo: models.ClientInfoModel{},\n\t\t\tLocation: models.LocationModel{},\n\t\t\tTable: models.TableModel{\n\t\t\t\tArea: models.AreaModel{},\n\t\t\t},\n\t\t\tStatus: models.ReservationStatusModel{},\n\t\t}\n\n\t\tif err = rows.Scan(\n\t\t\t&reservation.ID,\n\t\t\t&reservation.UID,\n\t\t\t&reservation.Date,\n\t\t\t&reservation.TimeLimit,\n\t\t\t&reservation.Guests,\n\t\t\t&reservation.Timestamp,\n\t\t\t&reservation.Updated,\n\t\t\t&reservation.ClientInfo.ID,\n\t\t\t&reservation.ClientInfo.FirstName,\n\t\t\t&reservation.ClientInfo.LastName,\n\t\t\t&reservation.ClientInfo.Email,\n\t\t\t&reservation.ClientInfo.Phone,\n\t\t\t&reservation.Status.ID,\n\t\t\t&reservation.Status.Description,\n\t\t\t&reservation.Status.Value,\n\t\t\t&reservation.Table.ID,\n\t\t\t&reservation.Table.Name,\n\t\t\t&reservation.Table.Description,\n\t\t\t&reservation.Table.ImgURL,\n\t\t\t&reservation.Table.MaxGuests,\n\t\t\t&reservation.Table.Area.ID,\n\t\t\t&reservation.Table.Area.Name,\n\t\t\t&reservation.Table.Area.Description,\n\t\t\t&reservation.Table.Area.ImgURL,\n\t\t\t&reservation.Location.ID,\n\t\t\t&reservation.Location.Name,\n\t\t\t&reservation.Location.BusinessID); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"%s\", err)\n\t\t}\n\n\t\tobjects = append(objects, reservation)\n\t}\n\n\tif err = rows.Err(); err != nil {\n\t\treturn nil, fmt.Errorf(\"%s\", err)\n\t}\n\n\tintfObjects := make([]interface{}, len(objects))\n\n\tfor i, obj := range objects {\n\t\tintfObjects[i] = obj\n\t}\n\n\treturn intfObjects, nil\n}",
"func (mr *MockRoomStorageMockRecorder) GetRoomIDsByStatus(ctx, scheduler, status interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetRoomIDsByStatus\", reflect.TypeOf((*MockRoomStorage)(nil).GetRoomIDsByStatus), ctx, scheduler, status)\n}",
"func (m *MockRoomStorage) GetRoomIDsByStatus(ctx context.Context, scheduler string, status game_room.GameRoomStatus) ([]string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetRoomIDsByStatus\", ctx, scheduler, status)\n\tret0, _ := ret[0].([]string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetAllUniqueID return a list of unique id values in repository | func GetAllUniqueID(config *Config, repoID string, field string) ([]string, error) {
stmt := fmt.Sprintf(`SELECT %s
FROM eprint
WHERE %s IS NOT NULL
GROUP BY %s ORDER BY %s`,
field, field, field, field)
return sqlQueryStringIDs(config, repoID, stmt)
} | [
"func (dbobj PGSQLDB) GetUniqueList(t Tbl, keyName string) ([]bson.M, error) {\n\ttable := GetTable(t)\n\tkeyName = dbobj.escapeName(keyName)\n\tq := \"select distinct \" + keyName + \" from \" + table + \" ORDER BY \" + keyName\n\t//fmt.Printf(\"q: %s\\n\", q)\n\tvalues := make([]interface{}, 0)\n\treturn dbobj.getListDo(q, values)\n}",
"func UniqueIDs(ids []string) []string {\r\n\tseen := make(map[string]struct{}, len(ids))\r\n\tj := 0\r\n\tfor _, v := range ids {\r\n\t\tif _, ok := seen[v]; ok {\r\n\t\t\tcontinue\r\n\t\t}\r\n\t\tseen[v] = struct{}{}\r\n\t\tids[j] = v\r\n\t\tj++\r\n\t}\r\n\treturn ids[:j]\r\n}",
"func ids(n int) []string {\n\tvar uuids []string\n\tfor i := 0; i < n; i++ {\n\t\tuuids = append(uuids, uuid.New().String())\n\t}\n\treturn uuids\n}",
"func GetAllIds() []int {\n\tch := make(chan []int, 1)\n\tsuccess := func(s []int) {\n\t\tch <- s\n\t}\n\tmo().Call(\"getAllIds\", success)\n\treturn <-ch\n}",
"func (manager *UsersManager) UserIDList() []string {\n\ts := set.New()\n\tmanager.EachEntry(func(authyID string, publicKey string) {\n\t\ts.Add(authyID)\n\t})\n\n\treturn set.StringSlice(s)\n}",
"func (m MarvelGetCharactersResponse) IdList() []int {\n\tvar list []int\n\tfor _, user := range m.Data.Results {\n\t\tlist = append(list, user.ID)\n\t}\n\treturn list\n}",
"func(db *Persistence) GetUniqueSources() ([]string, error) {\n log.Debug(\"fetching unique sources from database...\")\n\n sources := []string{}\n\n query := `SELECT DISTINCT source FROM asset_data_timeseries`\n results, err := db.Session.Query(context.Background(), query)\n if err != nil {\n return sources, err\n }\n\n var source string\n for results.Next() {\n if err := results.Scan(&source); err != nil {\n log.Warn(fmt.Errorf(\"unable to scan data into local variables: %+v\", err))\n continue\n }\n sources = append(sources, source)\n }\n return sources, nil\n}",
"func (u *Unknown) IDs() []int64 {\n\tids := make(int64Slice, len(u.attributeTrails))\n\ti := 0\n\tfor id := range u.attributeTrails {\n\t\tids[i] = id\n\t\ti++\n\t}\n\tids.Sort()\n\treturn ids\n}",
"func (api *API) GetUIDs() ([]string, error) {\n\tvar ret []string\n\tvalues := url.Values{}\n\n\tvalues.Add(\"format\", \"json\")\n\tbody, _, err := api.call(\"GET\", \"/metadata/user\", values, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tif err = json.Unmarshal(body, &ret); err != nil {\n\t\treturn ret, err\n\t}\n\treturn ret, nil\n}",
"func (a *Archiver) GetUUIDs(where_tags bson.M) ([]string, error) {\n\treturn a.store.GetUUIDs(where_tags)\n}",
"func (m *SchedulingGroup) GetUserIds()([]string) {\n val, err := m.GetBackingStore().Get(\"userIds\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]string)\n }\n return nil\n}",
"func (p *RequestParams) GetUniqueInts(key string) []int64 {\n\tints := []int64{}\n\n\tfor _, v := range p.Values[key] {\n\t\tif string(v) == \"\" {\n\t\t\tcontinue // ignore blank ints\n\t\t}\n\t\tvi, err := strconv.ParseInt(v, 10, 64)\n\t\tif err != nil {\n\t\t\tvi = 0\n\t\t}\n\n\t\t// Do not insert 0, or duplicate entries\n\t\tif vi > 0 && !contains(ints, vi) {\n\t\t\tints = append(ints, vi)\n\t\t}\n\t}\n\n\treturn ints\n}",
"func (t *TeamModel) ListAllIDs() ([]string, uint32, error) {\n\to := orm.NewOrm()\n\n\tsql := fmt.Sprintf(`SELECT \n id\nFROM\n %s;`, t.TableName())\n\tvar teamModels []TeamModel\n\t_, err := o.Raw(sql).QueryRows(&teamModels)\n\tif err != nil {\n\t\terrCode, err := sqlstatus.ParseErrorCode(err)\n\t\treturn nil, errCode, err\n\t}\n\n\tif len(teamModels) == 0 {\n\t\treturn []string{}, sqlstatus.SQLSuccess, nil\n\t}\n\n\t// get the user IDs\n\tvar teamIDStr []string\n\tfor _, um := range teamModels {\n\t\tteamIDStr = append(teamIDStr, um.TeamID)\n\t}\n\treturn teamIDStr, sqlstatus.SQLSuccess, nil\n}",
"func distinctObjectIDs(input []primitive.ObjectID) []primitive.ObjectID {\n\tu := make([]primitive.ObjectID, 0, len(input))\n\tm := make(map[primitive.ObjectID]bool)\n\n\tfor _, val := range input {\n\t\tif _, ok := m[val]; !ok {\n\t\t\tm[val] = true\n\t\t\tu = append(u, val)\n\t\t}\n\t}\n\n\treturn u\n}",
"func makeUUIDList(n int) []string {\n\toutput := make([]string, n)\n\tfor i := 0; i < n; i++ {\n\t\toutput[i] = randomUUID()\n\t}\n\treturn output\n}",
"func (wikidata Wikidata) PUIDs() []string {\n\tvar puids []string\n\tfor _, puid := range wikidata.PRONOM {\n\t\tpuids = append(puids, puid)\n\t}\n\treturn puids\n}",
"func (svc *tagSvcImpl) GetAll() ([]string, *Error) {\n\tres, err := r.Table(\"Tags\").Run(svc.session)\n\tif err != nil {\n\t\treturn nil, NewError(ErrDB, err)\n\t}\n\n\ttags := []*tag{}\n\terr = res.All(&tags)\n\tif err != nil {\n\t\treturn nil, NewError(ErrDB, err)\n\t}\n\n\tresult := make([]string, len(tags))\n\tfor i, s := range tags {\n\t\tresult[i] = s.ID\n\t}\n\n\treturn result, nil\n}",
"func RepositoryGetAll() ([]CatData, error) {\n\trows, err := DB.Query(selectAllQuery)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tretrieved := []CatData{}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar entity CatData\n\t\terr = rows.Scan(\n\t\t\t&entity.ID,\n\t\t\t&entity.Name,\n\t\t\t&entity.Sex,\n\t\t\t&entity.Breed,\n\t\t\t&entity.Color,\n\t\t\t&entity.Age,\n\t\t\t&entity.ImageURL,\n\t\t)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\tretrieved = append(retrieved, entity)\n\t}\n\n\treturn retrieved, nil\n}",
"func AllObservationSetIDs(db orm.DB) ([]int, error) {\n\tvar setIds []int\n\n\terr := db.Model(&ObservationSet{}).ColumnExpr(\"array_agg(id)\").Select(pg.Array(&setIds))\n\tif err == pg.ErrNoRows {\n\t\treturn make([]int, 0), nil\n\t} else if err != nil {\n\t\treturn nil, PTOWrapError(err)\n\t}\n\n\tsort.Slice(setIds, func(i, j int) bool { return setIds[i] < setIds[j] })\n\n\treturn setIds, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetEPrintIDsForUniqueID return list of eprints for DOI | func GetEPrintIDsForUniqueID(config *Config, repoID string, field string, value string) ([]int, error) {
// NOTE: There should only be one eprint per DOI but we have dirty data because the field is not contrained as Unique
stmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE LOWER(%s) = LOWER(?)`, field)
return sqlQueryIntIDs(config, repoID, stmt, value)
} | [
"func GetAllEPrintIDs(config *Config, repoID string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint\nORDER BY date_year DESC, date_month DESC, date_day DESC`)\n}",
"func GetEPrintIDsForORCID(config *Config, repoID string, orcid string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprint.eprintid AS eprintid\nFROM eprint_creators_orcid JOIN eprint ON (eprint_creators_orcid.eprintid = eprint.eprintid)\nWHERE creators_orcid = ?\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`, orcid)\n}",
"func GetEPrintIDsForPersonOrOrgID(config *Config, repoID string, personOrOrgType string, personOrOrgID string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprint_%s_id.eprintid AS eprintid\nFROM eprint_%s_id JOIN eprint ON (eprint_%s_id.eprintid = eprint.eprintid)\nWHERE eprint_%s_id.%s_id = ?\nORDER BY date_year DESC, date_month DESC, date_day DESC`,\n\t\tpersonOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType)\n\treturn sqlQueryIntIDs(config, repoID, stmt, personOrOrgID)\n}",
"func GetEPrintIDsForYear(config *Config, repoID string, year int) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE date_type = \"published\" AND date_year = ? ORDER BY date_year DESC, date_month DESC, date_day DESC`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, year)\n}",
"func GetEPrintIDsInTimestampRange(config *Config, repoID string, field string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 59), 2, \"0\")) <= ?)\nORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,\n\t\tfield, field, field, field, field, field, field, field, field, field, field, field,\n\t\tfield, field, field, field, field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, start, end)\n}",
"func GetEPrintIDsForDateType(config *Config, repoID string, dateType string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint\nWHERE ((date_type) = ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 1), 2, \"0\")) >= ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 28), 2, \"0\")) <= ?)\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, dateType, start, end)\n}",
"func GetAllUniqueID(config *Config, repoID string, field string) ([]string, error) {\n\tstmt := fmt.Sprintf(`SELECT %s\nFROM eprint\nWHERE %s IS NOT NULL\nGROUP BY %s ORDER BY %s`,\n\t\tfield, field, field, field)\n\treturn sqlQueryStringIDs(config, repoID, stmt)\n}",
"func GetEPrintIDsForPersonName(config *Config, repoID, field string, family string, given string) ([]int, error) {\n\tconditions := []string{}\n\tif strings.Contains(family, \"*\") || strings.Contains(given, \"%\") {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_family LIKE ?`, field))\n\t} else if family != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_family = ?`, field))\n\t}\n\tif strings.Contains(given, \"*\") || strings.Contains(given, \"%\") {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_given LIKE ?`, field))\n\t} else if given != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_given = ?`, field))\n\t}\n\tstmt := fmt.Sprintf(`SELECT eprint.eprintid AS eprintid\nFROM eprint_%s JOIN eprint ON (eprint_%s.eprintid = eprint.eprintid)\nWHERE %s\nORDER BY %s_family ASC, %s_given ASC, eprint.date_year DESC, eprint.date_month DESC, eprint.date_day DESC`,\n\t\tfield, field, strings.Join(conditions, \" AND \"), field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, family, given)\n}",
"func GetAllEPrintIDsWithStatus(config *Config, repoID string, status string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint WHERE (eprint_status = ?) ORDER BY date_year DESC, date_month DESC, date_day DESC`, status)\n}",
"func (u *Unknown) IDs() []int64 {\n\tids := make(int64Slice, len(u.attributeTrails))\n\ti := 0\n\tfor id := range u.attributeTrails {\n\t\tids[i] = id\n\t\ti++\n\t}\n\tids.Sort()\n\treturn ids\n}",
"func GetEPrintIDsWithStatus(config *Config, repoID string, status string, start string, end string) ([]int, error) {\n\tstmt := `SELECT eprintid FROM eprint WHERE (eprint_status = ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 59), 2, \"0\")) <= ?)\nORDER BY lastmod_year DESC, lastmod_month DESC, lastmod_day DESC,\n lastmod_hour DESC, lastmod_minute DESC, lastmod_minute DESC`\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func (n *Notes) IDs() []int64 {\n\tids := make([]int64, len(n.Notes))\n\tfor i, note := range n.Notes {\n\t\tids[i] = note.ID\n\t}\n\treturn ids\n}",
"func (seMap *StringElementMap) PrintJustIdentifiers(hl *Transaction) {\n\tseMap.TraceableLock()\n\tdefer seMap.TraceableUnlock()\n\tfor uuid := range seMap.elementMap {\n\t\tlog.Printf(\"UUID: %s \\n\", uuid)\n\t}\n}",
"func (db *MongoDBRooms) GetIdeaIDs(roomID string) ([]string, error) {\n\n\tvar room RoomStruct\n\tvar ideas []string\n\n\tsession, err := mgo.Dial(db.HOST.URI)\n\tif err != nil {\n\t\treturn ideas, errors.New(\"error dialing the database\")\n\t}\n\tdefer session.Close()\n\n\tfind := bson.M{\"_id\": bson.ObjectIdHex(roomID)}\n\terr = session.DB(db.HOST.NAME).C(db.COLLECTION).Find(find).One(&room)\n\tif err != nil {\n\t\treturn ideas, errors.New(\"error finding idea ids\")\n\t}\n\tfor _, id := range room.IdeaIDs {\n\t\tideas = append(ideas, id)\n\t}\n\n\treturn ideas, nil\n}",
"func (o *OutputStream) EnrollmentIDs() []string {\n\tduplicates := map[string]interface{}{}\n\tvar eIDs []string\n\tfor _, output := range o.outputs {\n\t\tif len(output.EnrollmentID) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := duplicates[output.EnrollmentID]; !ok {\n\t\t\teIDs = append(eIDs, output.EnrollmentID)\n\t\t\tduplicates[output.EnrollmentID] = true\n\t\t}\n\t}\n\treturn eIDs\n}",
"func GetAllPersonOrOrgIDs(config *Config, repoID string, field string) ([]string, error) {\n\tstmt := fmt.Sprintf(`SELECT %s_id FROM eprint_%s_id\nWHERE %s_id IS NOT NULL\nGROUP BY %s_id ORDER BY %s_id`, field, field, field, field, field)\n\treturn sqlQueryStringIDs(config, repoID, stmt)\n}",
"func (o GetEnterpriseProxyAccessesResultOutput) Ids() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetEnterpriseProxyAccessesResult) []string { return v.Ids }).(pulumi.StringArrayOutput)\n}",
"func GetEPrintIDsWithStatusInTimestampRange(config *Config, repoID string, status string, field string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE\n(eprint_status = ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 59), 2, \"0\")) <= ?)\nORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,\n\t\tfield, field, field, field, field, field,\n\t\tfield, field, field, field, field, field,\n\t\tfield, field, field, field, field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func getNodeIDs(ep endpoint.EndpointUpdater, policy *policy.L4Policy) []string {\n\tnodeIDs := make([]string, 0, 1)\n\tif ep.HasSidecarProxy() {\n\t\t// Istio sidecars have the Cilium bpf metadata filter\n\t\t// statically configured running the NPDS client, so\n\t\t// we may unconditionally wait for ACKs from the\n\t\t// sidecars.\n\t\t// Sidecar's IPv4 address is used as the node ID.\n\t\tipv4 := ep.GetIPv4Address()\n\t\tif ipv4 == \"\" {\n\t\t\tlog.Error(\"Envoy: Sidecar proxy has no IPv4 address\")\n\t\t} else {\n\t\t\tnodeIDs = append(nodeIDs, ipv4)\n\t\t}\n\t} else {\n\t\t// Host proxy uses \"127.0.0.1\" as the nodeID\n\t\tnodeIDs = append(nodeIDs, \"127.0.0.1\")\n\t}\n\t// Require additional ACK from proxylib if policy has proxylib redirects\n\t// Note that if a previous policy had a proxylib redirect and this one does not,\n\t// we only wait for the ACK from the main Envoy node ID.\n\tif policy.HasProxylibRedirect() {\n\t\t// Proxylib uses \"127.0.0.2\" as the nodeID\n\t\tnodeIDs = append(nodeIDs, \"127.0.0.2\")\n\t}\n\treturn nodeIDs\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetAllPersonOrOrgIDs return a list of creator ids or error | func GetAllPersonOrOrgIDs(config *Config, repoID string, field string) ([]string, error) {
stmt := fmt.Sprintf(`SELECT %s_id FROM eprint_%s_id
WHERE %s_id IS NOT NULL
GROUP BY %s_id ORDER BY %s_id`, field, field, field, field, field)
return sqlQueryStringIDs(config, repoID, stmt)
} | [
"func GetEPrintIDsForPersonOrOrgID(config *Config, repoID string, personOrOrgType string, personOrOrgID string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprint_%s_id.eprintid AS eprintid\nFROM eprint_%s_id JOIN eprint ON (eprint_%s_id.eprintid = eprint.eprintid)\nWHERE eprint_%s_id.%s_id = ?\nORDER BY date_year DESC, date_month DESC, date_day DESC`,\n\t\tpersonOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType)\n\treturn sqlQueryIntIDs(config, repoID, stmt, personOrOrgID)\n}",
"func GetAllORCIDs(config *Config, repoID string) ([]string, error) {\n\tvalues, err := sqlQueryStringIDs(config, repoID, `SELECT creators_orcid\n FROM eprint_creators_orcid\n WHERE creators_orcid IS NOT NULL\n GROUP BY creators_orcid ORDER BY creators_orcid`)\n\treturn values, err\n}",
"func GetAllPersonNames(config *Config, repoID string, field string) ([]string, error) {\n\tstmt := fmt.Sprintf(`SELECT CONCAT(%s_family, \"/\", %s_given) AS %s\nFROM eprint_%s\nWHERE (%s_family IS NOT NULL) OR (%s_given IS NOT NULL)\nGROUP BY %s_family, %s_given ORDER BY %s_family, %s_given`,\n\t\tfield, field, field,\n\t\tfield, field, field, field, field, field, field)\n\treturn sqlQueryStringIDs(config, repoID, stmt)\n}",
"func (d *driver) GetUserOrganizations(uid int) ([]organization.Organization, error) {\n\trows, err := d.db.Query(\"SELECT id, name, created_on, is_public,\"+\n\t\t\" (SELECT count(*) FROM member_of WHERE id=org_id),\"+\n\t\t\" (SELECT count(*) FROM team WHERE team.org_id=organization.id)\"+\n\t\t\" FROM organization JOIN member_of ON (id=member_of.org_id)\"+\n\t\t\" WHERE member_of.user_id=$1 AND is_deleted=false order by name\", uid)\n\tif err != nil {\n\t\tlog.Printf(\"Unable to receive organizations from the db: %v\", err)\n\t\treturn nil, err\n\t}\n\n\torgs := make([]organization.Organization, 0)\n\tfor rows.Next() {\n\t\torg := organization.Organization{}\n\t\terr := rows.Scan(&org.ID, &org.Name, &org.CreatedOn, &org.IsPublic, &org.MemberCount, &org.TeamCount)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Received error scanning in data from database: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\torgs = append(orgs, org)\n\t}\n\n\treturn orgs, err\n}",
"func AuthorizeFindOrganizations(ctx context.Context, rs []*influxdb.Organization) ([]*influxdb.Organization, int, error) {\n\t// This filters without allocating\n\t// https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating\n\trrs := rs[:0]\n\tfor _, r := range rs {\n\t\t_, _, err := AuthorizeReadOrg(ctx, r.ID)\n\t\tif err != nil && errors.ErrorCode(err) != errors.EUnauthorized {\n\t\t\treturn nil, 0, err\n\t\t}\n\t\tif errors.ErrorCode(err) == errors.EUnauthorized {\n\t\t\tcontinue\n\t\t}\n\t\trrs = append(rrs, r)\n\t}\n\treturn rrs, len(rrs), nil\n}",
"func getPlayerIdsFromRoster(r *mmlogic.Roster) []string {\n\tplayerIDs := make([]string, 0)\n\tfor _, p := range r.Players {\n\t\tplayerIDs = append(playerIDs, p.Id)\n\t}\n\treturn playerIDs\n\n}",
"func (m *ProjectMutation) CreatorIDs() (ids []int) {\n\tif id := m.creator; id != nil {\n\t\tids = append(ids, *id)\n\t}\n\treturn\n}",
"func GetPermUserOrg(c echo.Context) (err error) {\n\tresult, err := models.GetPermOrg(dbs.DB, stringtoInt(c.Param(\"orgid\")))\n\ttools.PanicIf(err)\n\tpmusrs := make([]models.Permit, 0.0)\n\tpmusr := models.Permit{}\n\tfor result.Next() {\n\t\tresult.Scan(&pmusr.Organizationid, &pmusr.Email)\n\t\tfmt.Println(pmusr.Organizationid, pmusr.Email)\n\t\tpmusrs = append(pmusrs, pmusr)\n\t}\n\t//\tdbs.CloseDB(db)\n\treturn c.JSON(http.StatusOK, pmusrs)\n}",
"func (s *OrgSvc) FindOrganizations(ctx context.Context, filter influxdb.OrganizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Organization, int, error) {\n\t// if im given a id or a name I know I can only return 1\n\tif filter.ID != nil || filter.Name != nil {\n\t\torg, err := s.FindOrganization(ctx, filter)\n\t\tif err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\t\treturn []*influxdb.Organization{org}, 1, nil\n\t}\n\n\tvar orgs []*influxdb.Organization\n\n\tif filter.UserID != nil {\n\t\t// find urms for orgs with this user\n\t\turms, _, err := s.svc.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{\n\t\t\tUserID: *filter.UserID,\n\t\t\tResourceType: influxdb.OrgsResourceType,\n\t\t}, opt...)\n\t\tif err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\t\t// find orgs by the urm's resource ids.\n\t\tfor _, urm := range urms {\n\t\t\to, err := s.FindOrganizationByID(ctx, urm.ResourceID)\n\t\t\tif err == nil {\n\t\t\t\t// if there is an error then this is a crufty urm and we should just move on\n\t\t\t\torgs = append(orgs, o)\n\t\t\t}\n\t\t}\n\n\t\treturn orgs, len(orgs), nil\n\t}\n\n\terr := s.store.View(ctx, func(tx kv.Tx) error {\n\t\tos, err := s.store.ListOrgs(ctx, tx, opt...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\torgs = os\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn orgs, len(orgs), err\n}",
"func (i *Interactor) getUsersIDs(ml []*Member) []string {\n\tids := make([]string, 0, len(ml))\n\tfor _, m := range ml {\n\t\tids = append(ids, m.UserID)\n\t}\n\treturn ids\n}",
"func (t *TeamModel) ListIDs(creatorId int32, dataselect *common.DataSelectQuery) ([]string, uint32, error) {\n\to := orm.NewOrm()\n\n\tsql := fmt.Sprintf(`SELECT \n id\nFROM\n %s where creator_id = %d and %s;`, t.TableName(), creatorId, dataselect.FilterQuery)\n\tvar teamModels []TeamModel\n\t_, err := o.Raw(sql).QueryRows(&teamModels)\n\tif err != nil {\n\t\terrCode, err := sqlstatus.ParseErrorCode(err)\n\t\treturn nil, errCode, err\n\t}\n\n\tif len(teamModels) == 0 {\n\t\treturn []string{}, sqlstatus.SQLSuccess, nil\n\t}\n\n\t// get the user IDs\n\tvar teamIDStr []string\n\tfor _, um := range teamModels {\n\t\tteamIDStr = append(teamIDStr, um.TeamID)\n\t}\n\treturn teamIDStr, sqlstatus.SQLSuccess, nil\n}",
"func (r *OrgUserRepository) GetUsersByOrgID(orgID string) []models.OrgUser {\n\tvar users []models.OrgUser\n\torgid := bson.ObjectIdHex(orgID)\n\titer := r.C.Find(bson.M{\"orgid\": orgid}).Iter()\n\tresult := models.OrgUser{}\n\tfor iter.Next(&result) {\n\t\tusers = append(users, result)\n\t}\n\treturn users\n}",
"func GetEPrintIDsForPersonName(config *Config, repoID, field string, family string, given string) ([]int, error) {\n\tconditions := []string{}\n\tif strings.Contains(family, \"*\") || strings.Contains(given, \"%\") {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_family LIKE ?`, field))\n\t} else if family != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_family = ?`, field))\n\t}\n\tif strings.Contains(given, \"*\") || strings.Contains(given, \"%\") {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_given LIKE ?`, field))\n\t} else if given != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_given = ?`, field))\n\t}\n\tstmt := fmt.Sprintf(`SELECT eprint.eprintid AS eprintid\nFROM eprint_%s JOIN eprint ON (eprint_%s.eprintid = eprint.eprintid)\nWHERE %s\nORDER BY %s_family ASC, %s_given ASC, eprint.date_year DESC, eprint.date_month DESC, eprint.date_day DESC`,\n\t\tfield, field, strings.Join(conditions, \" AND \"), field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, family, given)\n}",
"func orgsForAllUsersToJoin(userOrgMap map[string][]string) ([]string, []error) {\n\tvar errors []error\n\tfor userPattern, orgs := range userOrgMap {\n\t\tif userPattern != \"*\" {\n\t\t\terrors = append(errors, fmt.Errorf(\"unsupported auth.userOrgMap user pattern %q (only \\\"*\\\" is supported)\", userPattern))\n\t\t\tcontinue\n\t\t}\n\t\treturn orgs, errors\n\t}\n\treturn nil, errors\n}",
"func (d *driver) GetOrganizationMembers(org string, admins bool) ([]string, error) {\n\tadminCheck := \"\"\n\tif admins {\n\t\tadminCheck = \" AND member_of.admin=true\"\n\t}\n\n\trows, err := d.db.Query(\n\t\t\"SELECT username FROM users, organization, member_of\"+\n\t\t\t\" WHERE users.id = member_of.user_id AND organization.id = member_of.org_id\"+\n\t\t\t\" AND organization.name = $1 AND is_deleted=false\"+\n\t\t\tadminCheck+\n\t\t\t\" ORDER BY username\", org)\n\tif err != nil {\n\t\tlog.Printf(\"Unable to receive organization members from the db: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tusernames := make([]string, 0)\n\tfor rows.Next() {\n\t\tvar username string\n\t\terr := rows.Scan(&username)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Received error scanning in data from database: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tusernames = append(usernames, username)\n\t}\n\n\treturn usernames, nil\n}",
"func (e ServerEdges) OwnersOrErr() ([]*User, error) {\n\tif e.loadedTypes[0] {\n\t\treturn e.Owners, nil\n\t}\n\treturn nil, &NotLoadedError{edge: \"owners\"}\n}",
"func (manager *UsersManager) UserIDList() []string {\n\ts := set.New()\n\tmanager.EachEntry(func(authyID string, publicKey string) {\n\t\ts.Add(authyID)\n\t})\n\n\treturn set.StringSlice(s)\n}",
"func GetAllPerson() ([]models.Person, error) {\n\tvar p []models.Person\n\n\terr := Client.Select(\"id, name, lastname, age, dni\").Model(&models.Person{}).Find(&p).Error\n\n\tif err != nil {\n\t\treturn p, err\n\t}\n\treturn p, nil\n}",
"func (o *ViewUserGroups) GetUserIdsOk() (*[]int32, bool) {\n\tif o == nil || o.UserIds == nil {\n\t\treturn nil, false\n\t}\n\treturn o.UserIds, true\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetEPrintIDForPersonOrOrgID return a list of eprint ids associated with the person or organization id | func GetEPrintIDsForPersonOrOrgID(config *Config, repoID string, personOrOrgType string, personOrOrgID string) ([]int, error) {
stmt := fmt.Sprintf(`SELECT eprint_%s_id.eprintid AS eprintid
FROM eprint_%s_id JOIN eprint ON (eprint_%s_id.eprintid = eprint.eprintid)
WHERE eprint_%s_id.%s_id = ?
ORDER BY date_year DESC, date_month DESC, date_day DESC`,
personOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType)
return sqlQueryIntIDs(config, repoID, stmt, personOrOrgID)
} | [
"func GetAllPersonOrOrgIDs(config *Config, repoID string, field string) ([]string, error) {\n\tstmt := fmt.Sprintf(`SELECT %s_id FROM eprint_%s_id\nWHERE %s_id IS NOT NULL\nGROUP BY %s_id ORDER BY %s_id`, field, field, field, field, field)\n\treturn sqlQueryStringIDs(config, repoID, stmt)\n}",
"func GetEPrintIDsForPersonName(config *Config, repoID, field string, family string, given string) ([]int, error) {\n\tconditions := []string{}\n\tif strings.Contains(family, \"*\") || strings.Contains(given, \"%\") {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_family LIKE ?`, field))\n\t} else if family != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_family = ?`, field))\n\t}\n\tif strings.Contains(given, \"*\") || strings.Contains(given, \"%\") {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_given LIKE ?`, field))\n\t} else if given != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_given = ?`, field))\n\t}\n\tstmt := fmt.Sprintf(`SELECT eprint.eprintid AS eprintid\nFROM eprint_%s JOIN eprint ON (eprint_%s.eprintid = eprint.eprintid)\nWHERE %s\nORDER BY %s_family ASC, %s_given ASC, eprint.date_year DESC, eprint.date_month DESC, eprint.date_day DESC`,\n\t\tfield, field, strings.Join(conditions, \" AND \"), field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, family, given)\n}",
"func GetEPrintIDsForORCID(config *Config, repoID string, orcid string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprint.eprintid AS eprintid\nFROM eprint_creators_orcid JOIN eprint ON (eprint_creators_orcid.eprintid = eprint.eprintid)\nWHERE creators_orcid = ?\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`, orcid)\n}",
"func GetEPrintIDsForUniqueID(config *Config, repoID string, field string, value string) ([]int, error) {\n\t// NOTE: There should only be one eprint per DOI but we have dirty data because the field is not contrained as Unique\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE LOWER(%s) = LOWER(?)`, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, value)\n}",
"func GetAllEPrintIDs(config *Config, repoID string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint\nORDER BY date_year DESC, date_month DESC, date_day DESC`)\n}",
"func (o *Resource) GetOrgID() string {\n\tif o == nil || o.OrgID.Get() == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.OrgID.Get()\n}",
"func GetEPrintIDsForYear(config *Config, repoID string, year int) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE date_type = \"published\" AND date_year = ? ORDER BY date_year DESC, date_month DESC, date_day DESC`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, year)\n}",
"func GetOrgID(t jwt.Token) string {\n\tclaims := t.PrivateClaims()\n\torgID, ok := claims[\"OrgID\"]\n\tif !ok {\n\t\treturn \"\"\n\t}\n\treturn orgID.(string)\n}",
"func (e Expense) getAdvancedPartyId(parties schema.PartiesCollection, bimpfEmployees Employees) string {\n\tif e.AdvancedByEmployee {\n\t\tbimpfEmployee, err := bimpfEmployees.ById(e.EmployeeId)\n\t\tif err != nil {\n\t\t\tlogrus.Warn(err)\n\t\t}\n\t\temployee, err := parties.EmployeeByIdentifier(bimpfEmployee.SbId)\n\t\tif err != nil {\n\t\t\tlogrus.Warn(err)\n\t\t}\n\t\treturn employee.Id\n\t}\n\treturn \"\"\n}",
"func GetEPrintIDsForDateType(config *Config, repoID string, dateType string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint\nWHERE ((date_type) = ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 1), 2, \"0\")) >= ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 28), 2, \"0\")) <= ?)\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, dateType, start, end)\n}",
"func (*orgCryptoService) OrgByPeerIdentity(identity api.PeerIdentityType) api.OrgIdentityType {\n\treturn orgID\n}",
"func GetOrgByID(orgs []*Organization, id int) (*Organization, error) {\n\tfor _, org := range orgs {\n\t\tif org.ID == id {\n\t\t\treturn org, nil\n\t\t}\n\t}\n\treturn &Organization{}, errors.New(\"GetOrgByID: ID out of range\")\n}",
"func GetOrgACMPolicyID(t testing.TB, orgID string) string {\n\tfilter := fmt.Sprintf(\"parent:organizations/%s\", orgID)\n\tid := gcloud.Runf(t, \"access-context-manager policies list --organization %s --filter %s --quiet\", orgID, filter).Array()\n\tif len(id) == 0 {\n\t\treturn \"\"\n\t}\n\treturn GetLastSplitElement(id[0].Get(\"name\").String(), \"/\")\n}",
"func GetInternalOrgId() string {\n\tv := GetEnv(\"INTERNAL_ORG_ID\", \"wiserskills\")\n\treturn v\n}",
"func (e *EUICC) Eid(ctx context.Context) (string, error) {\n\treturn e.getStringProperty(ctx, hermesconst.EuiccPropertyEid)\n}",
"func getOrgHREFById(vcdClient *VCDClient, orgId string) (string, error) {\n\torgListHREF := vcdClient.Client.VCDHREF\n\torgListHREF.Path += \"/org\"\n\n\torgList := new(types.OrgList)\n\n\t_, err := vcdClient.Client.ExecuteRequest(orgListHREF.String(), http.MethodGet,\n\t\t\"\", \"error retrieving org list: %s\", nil, orgList)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\torgUuid, err := getBareEntityUuid(orgId)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t// Look for org UUID within OrgList\n\tfor _, org := range orgList.Org {\n\t\t// ID in orgList is usually empty. We extract the UUID from HREF to make the comparison\n\t\tuuidFromHref, err := GetUuidFromHref(org.HREF, true)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif uuidFromHref == orgUuid {\n\t\t\treturn org.HREF, nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"couldn't find org with ID: %s\", orgId)\n}",
"func GetEPrintIDsWithStatus(config *Config, repoID string, status string, start string, end string) ([]int, error) {\n\tstmt := `SELECT eprintid FROM eprint WHERE (eprint_status = ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 59), 2, \"0\")) <= ?)\nORDER BY lastmod_year DESC, lastmod_month DESC, lastmod_day DESC,\n lastmod_hour DESC, lastmod_minute DESC, lastmod_minute DESC`\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func (ji *Issue) OrganizationID() (param string) {\n\treturn param\n}",
"func (cc CodeClimate) GetOwnOrgID(orgname string) (string, error) {\n\tvar response struct {\n\t\tData []struct {\n\t\t\tID string\n\t\t\tAttributes struct {\n\t\t\t\tName string\n\t\t\t}\n\t\t}\n\t}\n\n\trowdata, err := cc.doRequest(\"GET\", \"orgs\", nil, &response)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor _, data := range response.Data {\n\t\tif data.Attributes.Name == orgname {\n\t\t\treturn data.ID, nil\n\t\t}\n\t}\n\n\treturn \"\", fmt.Errorf(\"org ID `%s` not found in response data\\nRESPONSE:\\n%v\", orgname, rowdata)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetAllORCIDs return a list of all ORCID in repository | func GetAllORCIDs(config *Config, repoID string) ([]string, error) {
values, err := sqlQueryStringIDs(config, repoID, `SELECT creators_orcid
FROM eprint_creators_orcid
WHERE creators_orcid IS NOT NULL
GROUP BY creators_orcid ORDER BY creators_orcid`)
return values, err
} | [
"func GetEPrintIDsForORCID(config *Config, repoID string, orcid string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprint.eprintid AS eprintid\nFROM eprint_creators_orcid JOIN eprint ON (eprint_creators_orcid.eprintid = eprint.eprintid)\nWHERE creators_orcid = ?\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`, orcid)\n}",
"func (s *Service) FindAll() ([]*Company, error) {\n\tcompanies, err := s.repo.FindAll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn companies, err\n}",
"func (s *OrganizationsService) ListAll(ctx context.Context) ([]*Organization, *http.Response, error) {\n\to := \"account/organizations\"\n\n\treq, err := s.client.NewRequest(\"GET\", o, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tvar orgs []*Organization\n\tresp, err := s.client.Do(ctx, req, &orgs)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn orgs, resp, nil\n}",
"func (m *DefaultReader) ListOrgs() ([]cfclient.Org, error) {\n\terr := m.init()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlo.G.Debug(\"Total orgs returned :\", len(m.orgs))\n\treturn m.orgs, nil\n}",
"func GetAllPersonOrOrgIDs(config *Config, repoID string, field string) ([]string, error) {\n\tstmt := fmt.Sprintf(`SELECT %s_id FROM eprint_%s_id\nWHERE %s_id IS NOT NULL\nGROUP BY %s_id ORDER BY %s_id`, field, field, field, field, field)\n\treturn sqlQueryStringIDs(config, repoID, stmt)\n}",
"func (r *RidesDAO) FindAllByCustomerId(cid string) ([]Ride, error){\n\tvar rides []Ride\n\terr := db.C(COLLECTION).Find(bson.M{\"customerID\": cid}).All(&rides)\n\treturn rides, err\n}",
"func ListCAs(c context.Context) ([]string, error) {\n\tkeys := []*ds.Key{}\n\tq := ds.NewQuery(\"CA\").Eq(\"Removed\", false).KeysOnly(true)\n\tif err := ds.GetAll(c, q, &keys); err != nil {\n\t\treturn nil, transient.Tag.Apply(err)\n\t}\n\tnames := make([]string, len(keys))\n\tfor i, key := range keys {\n\t\tnames[i] = key.StringID()\n\t}\n\treturn names, nil\n}",
"func FindCitoyens(c *gin.Context) {\n\tvar citoyens []models.Citoyen\n\tmodels.DB.Preload(\"Rang\").Find(&citoyens)\n\n\tc.JSON(http.StatusOK, gin.H{\"data\": citoyens})\n}",
"func RepositoryGetAll() ([]CatData, error) {\n\trows, err := DB.Query(selectAllQuery)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tretrieved := []CatData{}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar entity CatData\n\t\terr = rows.Scan(\n\t\t\t&entity.ID,\n\t\t\t&entity.Name,\n\t\t\t&entity.Sex,\n\t\t\t&entity.Breed,\n\t\t\t&entity.Color,\n\t\t\t&entity.Age,\n\t\t\t&entity.ImageURL,\n\t\t)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\tretrieved = append(retrieved, entity)\n\t}\n\n\treturn retrieved, nil\n}",
"func (m *DefaultManager) ListOrgs() ([]cfclient.Org, error) {\n\torgs, err := m.Client.ListOrgs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlo.G.Debug(\"Total orgs returned :\", len(orgs))\n\treturn orgs, nil\n}",
"func GetAllDistribs(c *gin.Context) {\n\tdb := c.MustGet(\"db\").(*gorm.DB)\n\tvar mainDistribs []entity.MainDistributor\n\tdb.Find(&mainDistribs)\n\n\tc.JSON(http.StatusOK, gin.H{\"data\": mainDistribs})\n}",
"func AuthorizeFindOrganizations(ctx context.Context, rs []*influxdb.Organization) ([]*influxdb.Organization, int, error) {\n\t// This filters without allocating\n\t// https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating\n\trrs := rs[:0]\n\tfor _, r := range rs {\n\t\t_, _, err := AuthorizeReadOrg(ctx, r.ID)\n\t\tif err != nil && errors.ErrorCode(err) != errors.EUnauthorized {\n\t\t\treturn nil, 0, err\n\t\t}\n\t\tif errors.ErrorCode(err) == errors.EUnauthorized {\n\t\t\tcontinue\n\t\t}\n\t\trrs = append(rrs, r)\n\t}\n\treturn rrs, len(rrs), nil\n}",
"func (r *MongoRepository) ReadAll() ([]auto.Entity, error) {\n\tcursor, err := r.db.Collection.Find(\n\t\tcontext.Background(),\n\t\tbson.M{},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar (\n\t\tv auto.Entity\n\t\tvv []auto.Entity\n\t)\n\tfor cursor.Next(context.TODO()) {\n\t\terr = cursor.Decode(&v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvv = append(vv, v)\n\t}\n\n\treturn vv, nil\n}",
"func (r RepositoryServiceOp) ListCis(n string) (CiList, error) {\n\n\tvar err error\n\tvar ciList []CiListEntry\n\n\turl := repositoryBasePath + \"/\" + \"query\" + \"?ancestor=/\" + n\n\n\treq, err := r.client.NewRequest(url, \"GET\", nil)\n\n\tresp, err := r.client.Do(req, &ciList)\n\n\tif err != nil {\n\t\treturn ciList, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\treturn ciList, nil\n}",
"func getAuthorities(contractID string) ([]string, error) {\n\tauthorityResponse, err := dnsv2.GetAuthorities(contractID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"getAuthorities - contractid %s: authorities retrieval failed. Error: %s\",\n\t\t\tcontractID, err.Error())\n\t}\n\tcontracts := authorityResponse.Contracts\n\tif len(contracts) != 1 {\n\t\treturn nil, fmt.Errorf(\"getAuthorities - contractid %s: Expected 1 element in array but got %d\",\n\t\t\tcontractID, len(contracts))\n\t}\n\tcid := contracts[0].ContractID\n\tif cid != contractID {\n\t\treturn nil, fmt.Errorf(\"getAuthorities - contractID %s: got authorities for wrong contractID (%s)\",\n\t\t\tcontractID, cid)\n\t}\n\tauthorities := contracts[0].Authorities\n\treturn authorities, nil\n}",
"func (i *Identity) GetAllIdentities(caname string, cb func(*json.Decoder) error) error {\n\tlog.Debugf(\"Entering identity.GetAllIdentities\")\n\tqueryParam := make(map[string]string)\n\tqueryParam[\"ca\"] = caname\n\terr := i.GetStreamResponse(\"identities\", queryParam, \"result.identities\", cb)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Debugf(\"Successfully retrieved identities\")\n\treturn nil\n}",
"func ListConstituentsCouncillorsUnauthorized(t *testing.T, ctx context.Context, service *goa.Service, ctrl app.CouncillorsController, id string) http.ResponseWriter {\n\t// Setup service\n\tvar (\n\t\tlogBuf bytes.Buffer\n\t\tresp interface{}\n\n\t\trespSetter goatest.ResponseSetterFunc = func(r interface{}) { resp = r }\n\t)\n\tif service == nil {\n\t\tservice = goatest.Service(&logBuf, respSetter)\n\t} else {\n\t\tlogger := log.New(&logBuf, \"\", log.Ltime)\n\t\tservice.WithLogger(goa.NewLogger(logger))\n\t\tnewEncoder := func(io.Writer) goa.Encoder { return respSetter }\n\t\tservice.Encoder = goa.NewHTTPEncoder() // Make sure the code ends up using this decoder\n\t\tservice.Encoder.Register(newEncoder, \"*/*\")\n\t}\n\n\t// Setup request context\n\trw := httptest.NewRecorder()\n\tu := &url.URL{\n\t\tPath: fmt.Sprintf(\"/councillors/%v/consituents\", id),\n\t}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\tpanic(\"invalid test \" + err.Error()) // bug\n\t}\n\tprms := url.Values{}\n\tprms[\"id\"] = []string{fmt.Sprintf(\"%v\", id)}\n\tif ctx == nil {\n\t\tctx = context.Background()\n\t}\n\tgoaCtx := goa.NewContext(goa.WithAction(ctx, \"CouncillorsTest\"), rw, req, prms)\n\tlistConstituentsCtx, err := app.NewListConstituentsCouncillorsContext(goaCtx, service)\n\tif err != nil {\n\t\tpanic(\"invalid test data \" + err.Error()) // bug\n\t}\n\n\t// Perform action\n\terr = ctrl.ListConstituents(listConstituentsCtx)\n\n\t// Validate response\n\tif err != nil {\n\t\tt.Fatalf(\"controller returned %s, logs:\\n%s\", err, logBuf.String())\n\t}\n\tif rw.Code != 401 {\n\t\tt.Errorf(\"invalid response status code: got %+v, expected 401\", rw.Code)\n\t}\n\n\t// Return results\n\treturn rw\n}",
"func (c *organizations) List() ([]types.Organization, error) {\n\tpath := \"/v3/organizations\"\n\tout := make([]types.Organization, 0)\n\treturn out, c.client.Get(path, &out)\n}",
"func GetAllCompanies(c *fiber.Ctx) error {\n\tdb := database.DB\n\tvar companies []model.Company\n\tdb.Find(&companies)\n\treturn c.JSON(fiber.Map{\"status\": \"success\", \"message\": \"All companies\", \"data\": companies})\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetEPrintIDsForORCID return a list of eprint ids associated with the ORCID | func GetEPrintIDsForORCID(config *Config, repoID string, orcid string) ([]int, error) {
return sqlQueryIntIDs(config, repoID, `SELECT eprint.eprintid AS eprintid
FROM eprint_creators_orcid JOIN eprint ON (eprint_creators_orcid.eprintid = eprint.eprintid)
WHERE creators_orcid = ?
ORDER BY date_year DESC, date_month DESC, date_day DESC
`, orcid)
} | [
"func GetAllEPrintIDs(config *Config, repoID string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint\nORDER BY date_year DESC, date_month DESC, date_day DESC`)\n}",
"func GetEPrintIDsForPersonOrOrgID(config *Config, repoID string, personOrOrgType string, personOrOrgID string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprint_%s_id.eprintid AS eprintid\nFROM eprint_%s_id JOIN eprint ON (eprint_%s_id.eprintid = eprint.eprintid)\nWHERE eprint_%s_id.%s_id = ?\nORDER BY date_year DESC, date_month DESC, date_day DESC`,\n\t\tpersonOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType)\n\treturn sqlQueryIntIDs(config, repoID, stmt, personOrOrgID)\n}",
"func GetAllORCIDs(config *Config, repoID string) ([]string, error) {\n\tvalues, err := sqlQueryStringIDs(config, repoID, `SELECT creators_orcid\n FROM eprint_creators_orcid\n WHERE creators_orcid IS NOT NULL\n GROUP BY creators_orcid ORDER BY creators_orcid`)\n\treturn values, err\n}",
"func GetEPrintIDsForUniqueID(config *Config, repoID string, field string, value string) ([]int, error) {\n\t// NOTE: There should only be one eprint per DOI but we have dirty data because the field is not contrained as Unique\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE LOWER(%s) = LOWER(?)`, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, value)\n}",
"func GetEPrintIDsForYear(config *Config, repoID string, year int) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE date_type = \"published\" AND date_year = ? ORDER BY date_year DESC, date_month DESC, date_day DESC`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, year)\n}",
"func GetAllPersonOrOrgIDs(config *Config, repoID string, field string) ([]string, error) {\n\tstmt := fmt.Sprintf(`SELECT %s_id FROM eprint_%s_id\nWHERE %s_id IS NOT NULL\nGROUP BY %s_id ORDER BY %s_id`, field, field, field, field, field)\n\treturn sqlQueryStringIDs(config, repoID, stmt)\n}",
"func GetEPrintIDsInTimestampRange(config *Config, repoID string, field string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 59), 2, \"0\")) <= ?)\nORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,\n\t\tfield, field, field, field, field, field, field, field, field, field, field, field,\n\t\tfield, field, field, field, field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, start, end)\n}",
"func GetEPrintIDsForPersonName(config *Config, repoID, field string, family string, given string) ([]int, error) {\n\tconditions := []string{}\n\tif strings.Contains(family, \"*\") || strings.Contains(given, \"%\") {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_family LIKE ?`, field))\n\t} else if family != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_family = ?`, field))\n\t}\n\tif strings.Contains(given, \"*\") || strings.Contains(given, \"%\") {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_given LIKE ?`, field))\n\t} else if given != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_given = ?`, field))\n\t}\n\tstmt := fmt.Sprintf(`SELECT eprint.eprintid AS eprintid\nFROM eprint_%s JOIN eprint ON (eprint_%s.eprintid = eprint.eprintid)\nWHERE %s\nORDER BY %s_family ASC, %s_given ASC, eprint.date_year DESC, eprint.date_month DESC, eprint.date_day DESC`,\n\t\tfield, field, strings.Join(conditions, \" AND \"), field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, family, given)\n}",
"func GetAllEPrintIDsWithStatus(config *Config, repoID string, status string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint WHERE (eprint_status = ?) ORDER BY date_year DESC, date_month DESC, date_day DESC`, status)\n}",
"func GetEPrintIDsForDateType(config *Config, repoID string, dateType string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint\nWHERE ((date_type) = ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 1), 2, \"0\")) >= ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 28), 2, \"0\")) <= ?)\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, dateType, start, end)\n}",
"func (c *Client) FindIrQwebFieldContactIds(criteria *Criteria, options *Options) ([]int64, error) {\n\tids, err := c.Search(IrQwebFieldContactModel, criteria, options)\n\tif err != nil {\n\t\treturn []int64{}, err\n\t}\n\treturn ids, nil\n}",
"func (s *RevListScanner) OID() []byte { return s.oid }",
"func (o *PostPagesPageIdIncidentsIncident) GetComponentIds() []string {\n\tif o == nil || o.ComponentIds == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn *o.ComponentIds\n}",
"func (m *Alert) GetIncidentIds()([]string) {\n return m.incidentIds\n}",
"func (this *AccountService) GetAllActivedCompanyIds() ([]bson.ObjectId, error) {\n\turl := this.AccountApiUrl + \"inapi/account/company_ids\"\n\tids := []bson.ObjectId{}\n\n\tidsInfo, err := session.Get(url, nil)\n\tif err != nil {\n\t\treturn ids, err\n\t}\n\n\toutput := &struct {\n\t\tErrCode int `json:\"errcode\"`\n\t\tErrMsg string `json:\"errmsg\"`\n\t\tData []bson.ObjectId `json:\"data,omitempty\"`\n\t}{}\n\n\terr = json.Unmarshal(idsInfo, output)\n\tif err != nil {\n\t\treturn ids, err\n\t}\n\n\tif output.ErrCode != tm.ERR_CODE_SUCCESS {\n\t\treturn ids, errors.New(output.ErrMsg)\n\t}\n\n\treturn output.Data, nil\n}",
"func (o GetServicePrincipalsResultOutput) ObjectIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetServicePrincipalsResult) []string { return v.ObjectIds }).(pulumi.StringArrayOutput)\n}",
"func GetEPrintIDsWithStatus(config *Config, repoID string, status string, start string, end string) ([]int, error) {\n\tstmt := `SELECT eprintid FROM eprint WHERE (eprint_status = ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 59), 2, \"0\")) <= ?)\nORDER BY lastmod_year DESC, lastmod_month DESC, lastmod_day DESC,\n lastmod_hour DESC, lastmod_minute DESC, lastmod_minute DESC`\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func (s *Controller) ListRoomIDs() ([]string, error) {\n\trooms, err := s.repo.FindAllRooms(s.db)\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\n\tvar roomIDs []string\n\tfor _, r := range rooms {\n\t\troomIDs = append(roomIDs, r.ID)\n\t}\n\n\treturn roomIDs, nil\n}",
"func (o *OutputStream) EnrollmentIDs() []string {\n\tduplicates := map[string]interface{}{}\n\tvar eIDs []string\n\tfor _, output := range o.outputs {\n\t\tif len(output.EnrollmentID) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := duplicates[output.EnrollmentID]; !ok {\n\t\t\teIDs = append(eIDs, output.EnrollmentID)\n\t\t\tduplicates[output.EnrollmentID] = true\n\t\t}\n\t}\n\treturn eIDs\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetAllItems returns a list of simple items (e.g. local_group) | func GetAllItems(config *Config, repoID string, field string) ([]string, error) {
stmt := fmt.Sprintf(`SELECT %s
FROM eprint_%s
WHERE eprint_%s.%s IS NOT NULL
GROUP BY eprint_%s.%s ORDER BY eprint_%s.%s`,
field, field, field, field, field, field, field, field)
return sqlQueryStringIDs(config, repoID, stmt)
} | [
"func GetAllItems() []*Item {\n\titems := []*Item{}\n\titemCol.ForEachDoc(func(id int, docContent []byte) (willMoveOn bool) {\n\t\tvar item Item\n\t\tjson.Unmarshal(docContent, &item)\n\t\titem.ID = fmt.Sprintf(\"%d\", id)\n\t\titems = append(items, &item)\n\t\treturn true\n\t})\n\treturn items\n}",
"func (s *SmartContract) GetAllItems(ctx contractapi.TransactionContextInterface) ([]*Item, error) {\n\t// range query with empty string for startKey and endKey does an\n\t// open-ended query of all items in the chaincode namespace.\n\tresultsIterator, err := ctx.GetStub().GetStateByRange(\"\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resultsIterator.Close()\n\n\tvar items []*Item\n\tfor resultsIterator.HasNext() {\n\t\tqueryResponse, err := resultsIterator.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar item Item\n\t\terr = json.Unmarshal(queryResponse.Value, &item)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif item.Original {\n\t\t\titems = append(items, &item)\n\t\t}\n\n\t}\n\n\treturn items, nil\n}",
"func (m *MongoDBAccesser) GetAllItems() []Item {\n\tif m.connection == nil {\n\t\tlog.Fatal(\"DBAccesser is not connected to a database\")\n\t}\n\n\tf := bson.M{}\n\tcursor, err := m.connection.Database(m.dbName).Collection(\"items\").Find(context.TODO(), f)\n\tutils.FailOnError(err, \"Error getting all items\")\n\n\tvar records []bson.M\n\terr = cursor.All(context.TODO(), &records)\n\tutils.FailOnError(err, \"Error reading all items\")\n\n\titems := []Item{}\n\tfor _, r := range records {\n\t\titems = append(items, Item{\n\t\t\tID: r[\"_id\"].(primitive.ObjectID).Hex(),\n\t\t\tName: r[\"name\"].(string),\n\t\t})\n\t}\n\n\treturn items\n}",
"func (p ItemPresenter) ReadAllFullItems(filter usecase.ItemFilter) (string, error) {\n\tis, err := filter.PickUpFullItems()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn concatAllItems(is) + \"があります。\", nil\n}",
"func (env Env) GetAllItems(w http.ResponseWriter, r *http.Request) {\n\tJSONResponse(\"SUCCESS\", \"Implement to return food item list\", w)\n}",
"func (l *IOTHubEnrichmentList) GetItems() []resource.Managed {\n\titems := make([]resource.Managed, len(l.Items))\n\tfor i := range l.Items {\n\t\titems[i] = &l.Items[i]\n\t}\n\treturn items\n}",
"func (c *Chromium) GetAllItems() ([]data.Item, error) {\n\tvar items []data.Item\n\tfor item, choice := range chromiumItems {\n\t\tm, err := getItemPath(c.profilePath, choice.mainFile)\n\t\tif err != nil {\n\t\t\tlog.Debugf(\"%s find %s file failed, ERR:%s\", c.name, item, err)\n\t\t\tcontinue\n\t\t}\n\t\ti := choice.newItem(m, \"\")\n\t\tlog.Debugf(\"%s find %s File Success\", c.name, item)\n\t\titems = append(items, i)\n\t}\n\treturn items, nil\n}",
"func GetItems() []models.Item {\n\tdb := db.Connect()\n\tdefer db.Close()\n\tvar items []models.Item\n\terr := db.Model(&items).Select()\n\tif err != nil {\n\t\treturn items\n\t}\n\treturn items\n}",
"func GetItems(w http.ResponseWriter, r *http.Request) {\n\titems, err := models.LayerInstance().Item.GetAll()\n\n\tif err != nil {\n\t\trender.Render(w, r, payloads.ErrInternalError(err))\n\t\treturn\n\t}\n\n\tif err := render.RenderList(w, r, payloads.NewItemListResponse(items)); err != nil {\n\t\trender.Render(w, r, payloads.ErrRender(err))\n\t\treturn\n\t}\n}",
"func GetItemList(basicAuth BasicAuthInfo) RespGetItemList {\n\n\tvar targetURL string\n\tif basicAuth.IsSandbox {\n\t\ttargetURL = SANDBOXURL + URIITEM\n\t} else {\n\t\ttargetURL = PRODURL + URIITEM\n\t}\n\n\trand.Seed(time.Now().UnixNano())\n\tbody := map[string]string{}\n\tbody[\"appid\"] = basicAuth.AppId\n\tbody[\"gameid\"] = basicAuth.GameId\n\tbody[\"openid\"] = basicAuth.OpenId\n\tbody[\"openkey\"] = basicAuth.OpenKey\n\tbody[\"ts\"] = strconv.Itoa(MakeTimestamp())\n\tbody[\"rnd\"] = strconv.Itoa(rand.Intn(9999999))\n\tbody[\"cmd\"] = \"1\"\n\tbody[\"mask\"] = \"1\"\n\n\treqBodyStr := addSig(URIITEM, basicAuth.AppKey, &body)\n\n\t// 发送POST请求\n\tresp, err := http.Post(targetURL, \"application/x-www-form-urlencoded\", strings.NewReader(reqBodyStr))\n\tdefer resp.Body.Close()\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\trespBody, _ := ioutil.ReadAll(resp.Body)\n\n\tret := RespGetItemList{}\n\tjson.Unmarshal(respBody, &ret)\n\n\treturn ret\n}",
"func (m *itemRepo) GetAll() []entity.Item {\n\titems := []entity.Item{}\n\tm.itemMap.Range(func(k, v interface{}) bool {\n\t\titem := v.(entity.Item)\n\t\titems = append(items, item)\n\t\treturn true\n\t})\n\n\t// Sort by name\n\tsort.SliceStable(items, func(i, j int) bool {\n\t\treturn items[i].Name < items[j].Name\n\t})\n\treturn items\n}",
"func (s *SGCollection) All(ctx context.Context) []SGItem {\n\tvar items []SGItem\n\tcursor, err := s.Mail.Find(ctx, bson.M{})\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil\n\t}\n\n\tdefer cursor.Close(ctx)\n\tfor cursor.Next(ctx) {\n\t\tvar item SGItem\n\t\terr := cursor.Decode(&item)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\titems = append(items, item)\n\t}\n\n\tif err := cursor.Err(); err != nil {\n\t\tlog.Println(err)\n\t}\n\n\treturn items\n}",
"func GetAllMartItems(ctx context.Context, filter interface{}) ([]*model.Item, error) {\n\tvar items []*model.Item\n\titems = cache.GetCache(\"mart-data\")\n\tif len(items) > 0 {\n\t\tfmt.Println(len(items))\n\t\treturn items, nil\n\t}\n\n\tcur, err := Collection.Find(context.TODO(), filter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer cur.Close(context.TODO())\n\tfor cur.Next(context.TODO()) {\n\t\tvar item model.Item\n\t\terr := cur.Decode(&item)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\titems = append(items, &item)\n\t}\n\tif err := cur.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn items, nil\n}",
"func (handler *Handler) GetAllItems(w http.ResponseWriter, r *http.Request) {\n\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tjson.NewEncoder(w).Encode(handler.service.FindAll())\n\n}",
"func (l *IOTHubRouteList) GetItems() []resource.Managed {\n\titems := make([]resource.Managed, len(l.Items))\n\tfor i := range l.Items {\n\t\titems[i] = &l.Items[i]\n\t}\n\treturn items\n}",
"func List(ctx context.Context, dbc *sql.DB) ([]items.Item, error) {\n\ts, err := dbc.QueryContext(\n\t\tctx,\n\t\t\"select * from items\",\n\t)\n\tif err != nil {\n\t\tlog.Print(ctx, err)\n\t\treturn nil, err\n\t}\n\n\tvar res []items.Item\n\n\tfor s.Next() {\n\t\tvar i items.Item\n\n\t\terr = s.Scan(&i.Name, &i.Code, &i.Type, &i.Value, &i.InitDate)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tres = append(res, i)\n\t}\n\n\treturn res, nil\n}",
"func GetAllFilteredItems(category string) []*Item {\n\tif category == \"\" {\n\t\treturn GetAllItems()\n\t}\n\n\titems := []*Item{}\n\tif len(cats.index[category]) == 0 {\n\t\treturn items\n\t}\n\n\tfor id := range cats.index[category] {\n\t\titem, _ := GetItem(id) // Ignore errors\n\t\titems = append(items, item)\n\t}\n\n\treturn items\n}",
"func (s *FakeStorage) GetItems() ([]Gif, error) {\n\tresult := make([]Gif, len(s.Gifs))\n\n\ti := 0\n\tfor _, item := range s.Gifs {\n\t\tresult[i] = item\n\t\ti++\n\t}\n\n\treturn result, nil\n}",
"func GetAll(w http.ResponseWriter, r *http.Request) {\n\trepo := Item{}\n\tvar e BaseRepository = repo\n\tvar items, err = e.FindAll()\n\tif err != nil {\n\t\trespondWithError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\tvar item []Item\n\terr = json.Unmarshal(items, &item)\n\tif err != nil {\n\t\trespondWithError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\trespondWithJson(w, http.StatusOK, item)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetAllPersonNames return a list of person names in repository | func GetAllPersonNames(config *Config, repoID string, field string) ([]string, error) {
stmt := fmt.Sprintf(`SELECT CONCAT(%s_family, "/", %s_given) AS %s
FROM eprint_%s
WHERE (%s_family IS NOT NULL) OR (%s_given IS NOT NULL)
GROUP BY %s_family, %s_given ORDER BY %s_family, %s_given`,
field, field, field,
field, field, field, field, field, field, field)
return sqlQueryStringIDs(config, repoID, stmt)
} | [
"func (pc *PersonController) GetPersonsByName(w http.ResponseWriter, r *http.Request) {\n\n\t// get the name parameter\n\tvars := mux.Vars(r)\n\tsearchValue := vars[\"name\"]\n\tif searchValue == \"\" {\n\t\trespondWithError(w, http.StatusBadRequest, \"missing search criteria\")\n\t\treturn\n\t}\n\n\t// adjust operator and predicate if neccessary\n\top, predicate, err := buildStringQueryComponents(searchValue)\n\tif err != nil {\n\t\trespondWithError(w, http.StatusBadRequest, fmt.Sprintf(`{\"GetPersonsByName\": \"%s\"}`, err))\n\t\treturn\n\t}\n\n\t// build GetParam\n\tp := sqac.GetParam{\n\t\tFieldName: \"name\",\n\t\tOperand: op,\n\t\tParamValue: predicate,\n\t\tNextOperator: \"\",\n\t}\n\tparams := []sqac.GetParam{}\n\tparams = append(params, p)\n\n\t// build base Href; common for each selected row\n\turlString := buildHrefBasic(r, true)\n\n\t// call the common Person GetSet method\n\tpersons, count, countReq, err := pc.getPersonSet(w, r, params)\n\tif persons != nil && countReq == false {\n\t\tfor i, l := range persons {\n\t\t\tpersons[i].Href = urlString + \"person/\" + strconv.FormatUint(uint64(l.ID), 10)\n\t\t}\n\t\trespondWithJSON(w, http.StatusOK, persons)\n\t\treturn\n\t}\n\n\tif countReq == true {\n\t\trespondWithCount(w, http.StatusOK, count)\n\t\treturn\n\t}\n\trespondWithJSON(w, http.StatusOK, \"[]\")\n}",
"func (idb *InDB) GetAllPerson(context *gin.Context) {\n\tvar (\n\t\tpersons []structs.Person\n\t\tresult gin.H\n\t)\n\n\tidb.DB.Find(&persons)\n\tif len(persons) < 1 {\n\t\tresult = gin.H{\n\t\t\t\"result\": nil,\n\t\t\t\"count\": 0,\n\t\t\t\"message\": \"succeed get all data\",\n\t\t}\n\t} else {\n\t\tresult = gin.H{\n\t\t\t\"result\": persons,\n\t\t\t\"count\": len(persons),\n\t\t\t\"message\": \"succeed get all data\",\n\t\t}\n\t}\n\tcontext.JSON(http.StatusOK, result)\n}",
"func GetAllPerson() ([]models.Person, error) {\n\tvar p []models.Person\n\n\terr := Client.Select(\"id, name, lastname, age, dni\").Model(&models.Person{}).Find(&p).Error\n\n\tif err != nil {\n\t\treturn p, err\n\t}\n\treturn p, nil\n}",
"func GetAllPersons() ([]models.Person, error) {\n\tdatabase, err := getArangoDatabase()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tquery := \"FOR p IN persons RETURN p\"\n\tcursor, err := database.Query(nil, query, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpersons := []models.Person{}\n\tfor cursor.HasMore() {\n\t\tperson := new(models.Person)\n\t\tmeta, err := cursor.ReadDocument(nil, &person.Payload)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tperson.Key = meta.Key\n\t\tpersons = append(persons, *person)\n\t}\n\n\treturn persons, nil\n}",
"func (a *server) ListPeople(ctx context.Context, in *pb.Empty) (*pb.ListReplay, error) {\n\tlog.Println(\"List People\")\n\n\tpeople := []*pb.Person{}\n\ta.db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"People\"))\n\t\tc := b.Cursor()\n\t\tfor k, v := c.First(); k != nil; k, v = c.Next() {\n\t\t\t//fmt.Printf(\"key=%s, value=%s\\n\", k, v)\n\t\t\tp := pb.Person{}\n\t\t\terr := json.Unmarshal(v, &p)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tpeople = append(people, &p)\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn &pb.ListReplay{People: people}, nil\n}",
"func (pc *PersonController) GetPersons(w http.ResponseWriter, r *http.Request) {\n\n\tvar persons []models.Person\n\tvar count uint64\n\tcountReq := false\n\n\t// build base Href; common for each selected row\n\turlString := buildHrefBasic(r, true)\n\n\t// call the common getPersonSet method\n\tpersons, count, countReq, err := pc.getPersonSet(w, r, nil)\n\tif err != nil {\n\t\trespondWithError(w, http.StatusBadRequest, fmt.Sprintf(`{\"GetPersons\": \"%s\"}`, err))\n\t\treturn\n\t}\n\n\t// retrieved []Person and not asked to $count\n\tif persons != nil && countReq == false {\n\t\tfor i, l := range persons {\n\t\t\tpersons[i].Href = urlString + strconv.FormatUint(uint64(l.ID), 10)\n\t\t}\n\t\trespondWithJSON(w, http.StatusOK, persons)\n\t\treturn\n\t}\n\n\t// $count was requested, which trumps all other commands\n\tif countReq == true {\n\t\trespondWithCount(w, http.StatusOK, count)\n\t\treturn\n\t}\n\n\t// fallthrough and return nothing\n\trespondWithJSON(w, http.StatusOK, \"[]\")\n}",
"func (api *API) getUsernames(ctx *gin.Context) {\n\tusernames, err := api.database.GetAllUsernames()\n\tif api.check(err, ctx) {\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusOK, gr(usernames))\n}",
"func GetNames(context interface{}, db *db.DB) ([]string, error) {\n\tlog.Dev(context, \"GetNames\", \"Started\")\n\n\tvar rawNames []struct {\n\t\tName string\n\t}\n\n\tkey := \"gns\"\n\tif v, found := cache.Get(key); found {\n\t\tnames := v.([]string)\n\t\tlog.Dev(context, \"GetNames\", \"Completed : CACHE : Sets[%d]\", len(names))\n\t\treturn names, nil\n\t}\n\n\tf := func(c *mgo.Collection) error {\n\t\ts := bson.M{\"name\": 1}\n\t\tlog.Dev(context, \"GetNames\", \"MGO : db.%s.find({}, %s).sort([\\\"name\\\"])\", c.Name, mongo.Query(s))\n\t\treturn c.Find(nil).Select(s).Sort(\"name\").All(&rawNames)\n\t}\n\n\tif err := db.ExecuteMGO(context, Collection, f); err != nil {\n\t\tif err == mgo.ErrNotFound {\n\t\t\terr = ErrNotFound\n\t\t}\n\n\t\tlog.Error(context, \"GetNames\", err, \"Completed\")\n\t\treturn nil, err\n\t}\n\n\tif rawNames == nil {\n\t\tlog.Error(context, \"GetNames\", ErrNotFound, \"Completed\")\n\t\treturn nil, ErrNotFound\n\t}\n\n\tnames := make([]string, len(rawNames))\n\tfor i := range rawNames {\n\t\tnames[i] = rawNames[i].Name\n\t}\n\n\tcache.Set(key, names, gc.DefaultExpiration)\n\n\tlog.Dev(context, \"GetNames\", \"Completed : Sets[%d]\", len(names))\n\treturn names, nil\n}",
"func ListPeople(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\tvar (\n\t\tlimit = handler.ToLimit(r)\n\t\toffset = handler.ToOffset(r)\n\t)\n\n\tpeople, err := datastore.ListPeople(ctx, limit, offset)\n\tif err != nil {\n\t\tlog.Printf(\"error: error listing people err=%q\", err)\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\trenderTemplate(ctx, w, \"person_list.tmpl\", M{\n\t\t\"People\": people,\n\t})\n}",
"func GetPersonsHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Add(\"content-type\", \"application/json\")\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\tclient, err := mongo.Connect(ctx, options.Client().ApplyURI(\n\t\t\"mongodb+srv://mongouser:[email protected]/test?retryWrites=true&w=majority\",\n\t))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tgoofDB := client.Database(\"test\")\n\tgoofCollection := goofDB.Collection(\"persons\")\n\n\tcursor, err := goofCollection.Find(ctx, bson.M{})\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(`{ \"message\" : \"` + err.Error() + `\"}`))\n\t\treturn\n\t}\n\tdefer cursor.Close(ctx)\n\tvar persons []models.Person\n\tfor cursor.Next(ctx) {\n\t\tvar person models.Person\n\t\tcursor.Decode(&person)\n\t\tpersons = append(persons, person)\n\t}\n\tif err = cursor.Err(); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(`{ \"message\" : \"` + err.Error() + `\"}`))\n\t\treturn\n\t}\n\tjson.NewEncoder(w).Encode(persons)\n\n}",
"func getFilteredNames(ctx *cliContext) []string {\n\tnames := []string{}\n\tobjList := getNewObjList(ctx)\n\terr := ctx.restGetFunc(ctx.server, ctx.tenant, ctx.token, objList)\n\tif err != nil {\n\t\tfmt.Printf(\"Error getting %ss: %v\", ctx.subcmd, err)\n\t\treturn names\n\t}\n\n\tfor idx := 0; idx < getNumItems(objList); idx++ {\n\t\tobj := getObjOrList(objList, idx)\n\t\tobjm, err := runtime.GetObjectMeta(obj)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Unable to fetch object meta from the object: %+v\", objList)\n\t\t\treturn names\n\t\t}\n\t\tif skipObj(ctx, objm) {\n\t\t\tcontinue\n\t\t}\n\t\tnames = append(names, objm.Name)\n\t}\n\n\treturn names\n}",
"func GetAllUserNames() []string {\n\treturn usernames\n}",
"func (s *Storage) ListPersons(ctx context.Context, options types.ListingOptions) ([]*personpb.Person, types.ListingMetas, error) {\n\n\tlist := []*personpb.Person{}\n\n\t// todo implement options.Fields\n\n\tres, err := searchengine.List(CollectionPersons, options, map[string]string{})\n\tif err == nil {\n\t\tfor _, hit := range res.Hits {\n\t\t\titem, e := s.GetPerson(ctx, hit.ID)\n\t\t\tif e == nil {\n\t\t\t\t// append to list\n\t\t\t\tlist = append(list, item)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn list, types.ListingMetas{NumOfRecordsForRequest: uint32(res.Total)}, err\n}",
"func LoadAllNamesByFromRepository(db gorp.SqlExecutor, projID int64, fromRepository string) (sdk.IDNames, error) {\n\tif fromRepository == \"\" {\n\t\treturn nil, sdk.WithData(sdk.ErrUnknownError, \"could not call LoadAllNamesByFromRepository with empty fromRepository\")\n\t}\n\tquery := `SELECT application.id, application.name\n\t\t\t FROM application\n\t\t\t WHERE project_id = $1 AND from_repository = $2\n\t\t\t ORDER BY application.name`\n\n\tvar res sdk.IDNames\n\tif _, err := db.Select(&res, query, projID, fromRepository); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn res, nil\n\t\t}\n\t\treturn nil, sdk.WrapError(err, \"application.LoadAllNamesByFromRepository\")\n\t}\n\n\treturn res, nil\n}",
"func (e Employees) Names() []string {\n\tvar names []string\n\tfor name, _ := range e.salaries {\n\t\tnames = append(names, name)\n\t}\n\tsort.Strings(names)\n\treturn names\n}",
"func (v *View) getNames(containerID string) []string {\n\titer, err := v.txn.Get(memdbNamesTable, memdbContainerIDIndex, containerID)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tvar names []string\n\tfor {\n\t\titem := iter.Next()\n\t\tif item == nil {\n\t\t\tbreak\n\t\t}\n\t\tnames = append(names, item.(nameAssociation).name)\n\t}\n\n\treturn names\n}",
"func (g *Generator) Names(count int) []string {\n\tvar names []string\n\tfor i := 0; i < count; i++ {\n\t\tfirst, middle, last := g.Name()\n\t\tif g.FlipCoin(50) {\n\t\t\tnames = append(names, first+\" \"+string(middle[0])+\". \"+last)\n\t\t} else {\n\t\t\tnames = append(names, first+\" \"+last)\n\t\t}\n\t}\n\treturn names\n}",
"func (p *Person) Children(name string) ([]string, error) {\n query := fmt.Sprintf(\"MATCH (n:Person)-[:PARENT]->(m) WHERE n.name = '%s' RETURN DISTINCT collect(m.name) as names;\", name)\n peopleNames, err := p.DB.Session.WriteTransaction(func(transaction neo4j.Transaction) (interface{}, error) {\n result, err := transaction.Run(\n query,\n map[string]interface{}{})\n if err != nil {\n return nil, err\n }\n var peopleNames []string\n for result.Next() {\n record := result.Record()\n\n names, ok := record.Get(\"names\")\n if !ok {return nil, fmt.Errorf(\"Couldn't get names\")}\n\n var n []string\n if names != nil {\n n, err = parseInterfaceToString(names)\n if err != nil {return nil, err}\n }\n\n peopleNames = n\n }\n return peopleNames, result.Err()\n })\n if err != nil {return nil, err}\n\n asserted, ok := peopleNames.([]string)\n if !ok {\n return nil, nil\n }\n\n\treturn asserted, nil\n}",
"func getNamesByPhoneNum(c *gin.Context) {\n\tvar (\n\t\terr error\n\t\tsuccess = false\n\t\terrMsg = \"\"\n\t\tnames []string\n\t)\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\terrMsg = \"服务器错误\"\n\t\t\tlog.Printf(\"getNamesByPhone() err: %v\", err)\n\t\t}\n\n\t\tif errMsg != \"\" {\n\t\t\tlog.Printf(\"getNamesByPhone() errMsg: %v\", errMsg)\n\t\t}\n\n\t\tc.JSON(200, gin.H{\"success\": success, \"err_msg\": errMsg, \"names\": names})\n\t}()\n\n\tphoneNum := c.Param(\"phone_num\")\n\tif !ming.ValidPhoneNum(phoneNum) {\n\t\terrMsg = \"无效联系电话\"\n\t\treturn\n\t}\n\n\tif names, err = db.GetNamesByPhoneNum(phoneNum); err != nil {\n\t\treturn\n\t}\n\n\tsuccess = true\n\tlog.Printf(\"phone num: %v, names: %v\", phoneNum, names)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetEPrintIDsForPersonName return a list of eprint id for a person's name (family, given) | func GetEPrintIDsForPersonName(config *Config, repoID, field string, family string, given string) ([]int, error) {
conditions := []string{}
if strings.Contains(family, "*") || strings.Contains(given, "%") {
conditions = append(conditions, fmt.Sprintf(`%s_family LIKE ?`, field))
} else if family != "" {
conditions = append(conditions, fmt.Sprintf(`%s_family = ?`, field))
}
if strings.Contains(given, "*") || strings.Contains(given, "%") {
conditions = append(conditions, fmt.Sprintf(`%s_given LIKE ?`, field))
} else if given != "" {
conditions = append(conditions, fmt.Sprintf(`%s_given = ?`, field))
}
stmt := fmt.Sprintf(`SELECT eprint.eprintid AS eprintid
FROM eprint_%s JOIN eprint ON (eprint_%s.eprintid = eprint.eprintid)
WHERE %s
ORDER BY %s_family ASC, %s_given ASC, eprint.date_year DESC, eprint.date_month DESC, eprint.date_day DESC`,
field, field, strings.Join(conditions, " AND "), field, field)
return sqlQueryIntIDs(config, repoID, stmt, family, given)
} | [
"func GetEPrintIDsForPersonOrOrgID(config *Config, repoID string, personOrOrgType string, personOrOrgID string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprint_%s_id.eprintid AS eprintid\nFROM eprint_%s_id JOIN eprint ON (eprint_%s_id.eprintid = eprint.eprintid)\nWHERE eprint_%s_id.%s_id = ?\nORDER BY date_year DESC, date_month DESC, date_day DESC`,\n\t\tpersonOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType)\n\treturn sqlQueryIntIDs(config, repoID, stmt, personOrOrgID)\n}",
"func GetAllPersonNames(config *Config, repoID string, field string) ([]string, error) {\n\tstmt := fmt.Sprintf(`SELECT CONCAT(%s_family, \"/\", %s_given) AS %s\nFROM eprint_%s\nWHERE (%s_family IS NOT NULL) OR (%s_given IS NOT NULL)\nGROUP BY %s_family, %s_given ORDER BY %s_family, %s_given`,\n\t\tfield, field, field,\n\t\tfield, field, field, field, field, field, field)\n\treturn sqlQueryStringIDs(config, repoID, stmt)\n}",
"func GetAllPersonOrOrgIDs(config *Config, repoID string, field string) ([]string, error) {\n\tstmt := fmt.Sprintf(`SELECT %s_id FROM eprint_%s_id\nWHERE %s_id IS NOT NULL\nGROUP BY %s_id ORDER BY %s_id`, field, field, field, field, field)\n\treturn sqlQueryStringIDs(config, repoID, stmt)\n}",
"func GetAllEPrintIDs(config *Config, repoID string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint\nORDER BY date_year DESC, date_month DESC, date_day DESC`)\n}",
"func (pc *PersonController) GetPersonsByName(w http.ResponseWriter, r *http.Request) {\n\n\t// get the name parameter\n\tvars := mux.Vars(r)\n\tsearchValue := vars[\"name\"]\n\tif searchValue == \"\" {\n\t\trespondWithError(w, http.StatusBadRequest, \"missing search criteria\")\n\t\treturn\n\t}\n\n\t// adjust operator and predicate if neccessary\n\top, predicate, err := buildStringQueryComponents(searchValue)\n\tif err != nil {\n\t\trespondWithError(w, http.StatusBadRequest, fmt.Sprintf(`{\"GetPersonsByName\": \"%s\"}`, err))\n\t\treturn\n\t}\n\n\t// build GetParam\n\tp := sqac.GetParam{\n\t\tFieldName: \"name\",\n\t\tOperand: op,\n\t\tParamValue: predicate,\n\t\tNextOperator: \"\",\n\t}\n\tparams := []sqac.GetParam{}\n\tparams = append(params, p)\n\n\t// build base Href; common for each selected row\n\turlString := buildHrefBasic(r, true)\n\n\t// call the common Person GetSet method\n\tpersons, count, countReq, err := pc.getPersonSet(w, r, params)\n\tif persons != nil && countReq == false {\n\t\tfor i, l := range persons {\n\t\t\tpersons[i].Href = urlString + \"person/\" + strconv.FormatUint(uint64(l.ID), 10)\n\t\t}\n\t\trespondWithJSON(w, http.StatusOK, persons)\n\t\treturn\n\t}\n\n\tif countReq == true {\n\t\trespondWithCount(w, http.StatusOK, count)\n\t\treturn\n\t}\n\trespondWithJSON(w, http.StatusOK, \"[]\")\n}",
"func GetEPrintIDsForUniqueID(config *Config, repoID string, field string, value string) ([]int, error) {\n\t// NOTE: There should only be one eprint per DOI but we have dirty data because the field is not contrained as Unique\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE LOWER(%s) = LOWER(?)`, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, value)\n}",
"func GetEPrintIDsForORCID(config *Config, repoID string, orcid string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprint.eprintid AS eprintid\nFROM eprint_creators_orcid JOIN eprint ON (eprint_creators_orcid.eprintid = eprint.eprintid)\nWHERE creators_orcid = ?\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`, orcid)\n}",
"func GetIdentifierFromNamedList(sequenceNode *yamlv3.Node) string {\n\tcounters := map[string]int{}\n\n\tfor _, mappingNode := range sequenceNode.Content {\n\t\tfor i := 0; i < len(mappingNode.Content); i += 2 {\n\t\t\tk := mappingNode.Content[i]\n\n\t\t\tif _, ok := counters[k.Value]; !ok {\n\t\t\t\tcounters[k.Value] = 0\n\t\t\t}\n\n\t\t\tcounters[k.Value]++\n\t\t}\n\t}\n\n\tlistLength := len(sequenceNode.Content)\n\tfor _, identifier := range []string{\"name\", \"key\", \"id\"} {\n\t\tif count, ok := counters[identifier]; ok && count == listLength {\n\t\t\treturn identifier\n\t\t}\n\t}\n\n\treturn \"\"\n}",
"func FindIds(name string) ([]int32, error) {\n\tvar pids []int32\n\tnps, err := Process()\n\tif err != nil {\n\t\treturn pids, err\n\t}\n\n\tname = strings.ToLower(name)\n\tfor i := 0; i < len(nps); i++ {\n\t\tpsname := strings.ToLower(nps[i].Name)\n\t\tabool := strings.Contains(psname, name)\n\t\tif abool {\n\t\t\tpids = append(pids, nps[i].Pid)\n\t\t}\n\t}\n\n\treturn pids, err\n}",
"func (e Employees) Names() []string {\n\tvar names []string\n\tfor name, _ := range e.salaries {\n\t\tnames = append(names, name)\n\t}\n\tsort.Strings(names)\n\treturn names\n}",
"func FindNames() ([]string, error) {\n\tvar strArr []string\n\tpid, err := process.Pids()\n\n\tif err != nil {\n\t\treturn strArr, err\n\t}\n\n\tfor i := 0; i < len(pid); i++ {\n\t\tnps, err := process.NewProcess(pid[i])\n\t\tif err != nil {\n\t\t\treturn strArr, err\n\t\t}\n\n\t\tnames, err := nps.Name()\n\t\tif err != nil {\n\t\t\treturn strArr, err\n\t\t}\n\n\t\tstrArr = append(strArr, names)\n\t}\n\n\treturn strArr, err\n}",
"func (e *Service) getParticipants() []string {\n\tcandidates := make([]string, len(e.participants))\n\tfor i, candidate := range e.participants {\n\t\tcandidates[i] = candidate.ID\n\t}\n\treturn candidates\n}",
"func GetEPrintIDsForYear(config *Config, repoID string, year int) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE date_type = \"published\" AND date_year = ? ORDER BY date_year DESC, date_month DESC, date_day DESC`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, year)\n}",
"func (_Token *TokenCaller) PepeNames(opts *bind.CallOpts, arg0 *big.Int) ([32]byte, error) {\n\tvar (\n\t\tret0 = new([32]byte)\n\t)\n\tout := ret0\n\terr := _Token.contract.Call(opts, out, \"pepeNames\", arg0)\n\treturn *ret0, err\n}",
"func (_Token *TokenFilterer) FilterPepeNamed(opts *bind.FilterOpts, pepeId []*big.Int) (*TokenPepeNamedIterator, error) {\n\n\tvar pepeIdRule []interface{}\n\tfor _, pepeIdItem := range pepeId {\n\t\tpepeIdRule = append(pepeIdRule, pepeIdItem)\n\t}\n\n\tlogs, sub, err := _Token.contract.FilterLogs(opts, \"PepeNamed\", pepeIdRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &TokenPepeNamedIterator{contract: _Token.contract, event: \"PepeNamed\", logs: logs, sub: sub}, nil\n}",
"func GetEPrintIDsForDateType(config *Config, repoID string, dateType string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint\nWHERE ((date_type) = ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 1), 2, \"0\")) >= ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 28), 2, \"0\")) <= ?)\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, dateType, start, end)\n}",
"func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) {\n\tcount := 0\n\tid := \"\"\n\tallPages, err := ListDetail(client, nil).AllPages()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tall, err := ExtractImages(allPages)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, f := range all {\n\t\tif f.Name == name {\n\t\t\tcount++\n\t\t\tid = f.ID\n\t\t}\n\t}\n\n\tswitch count {\n\tcase 0:\n\t\terr := &gophercloud.ErrResourceNotFound{}\n\t\terr.ResourceType = \"image\"\n\t\terr.Name = name\n\t\treturn \"\", err\n\tcase 1:\n\t\treturn id, nil\n\tdefault:\n\t\terr := &gophercloud.ErrMultipleResourcesFound{}\n\t\terr.ResourceType = \"image\"\n\t\terr.Name = name\n\t\terr.Count = count\n\t\treturn \"\", err\n\t}\n}",
"func (_Token *TokenCallerSession) PepeNames(arg0 *big.Int) ([32]byte, error) {\n\treturn _Token.Contract.PepeNames(&_Token.CallOpts, arg0)\n}",
"func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) {\n\tcount := 0\n\tid := \"\"\n\tallPages, err := images.ListDetail(client, nil).AllPages()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tall, err := images.ExtractImages(allPages)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, f := range all {\n\t\tif f.Name == name {\n\t\t\tcount++\n\t\t\tid = f.ID\n\t\t}\n\t}\n\n\tswitch count {\n\tcase 0:\n\t\terr := &gophercloud.ErrResourceNotFound{}\n\t\terr.ResourceType = \"image\"\n\t\terr.Name = name\n\t\treturn \"\", err\n\tcase 1:\n\t\treturn id, nil\n\tdefault:\n\t\terr := &gophercloud.ErrMultipleResourcesFound{}\n\t\terr.ResourceType = \"image\"\n\t\terr.Name = name\n\t\terr.Count = count\n\t\treturn \"\", err\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetAllYears returns the publication years found in a repository | func GetAllYears(config *Config, repoID string) ([]int, error) {
stmt := fmt.Sprintf(`SELECT date_year FROM eprint WHERE date_type = "published" AND date_year IS NOT NULL GROUP BY date_year ORDER BY date_year DESC`)
return sqlQueryInts(config, repoID, stmt)
} | [
"func GetYears() map[int]Record {\n\tyears := make(map[int]Record)\n\tpages := page.GetAllPages()\n\tfor _, p := range pages {\n\t\tif p.Status == page.Published && p.Template == \"post\" {\n\t\t\tyear := p.PubDate.Year()\n\t\t\tr, present := years[year]\n\t\t\tif present {\n\t\t\t\tr.Count++\n\t\t\t} else {\n\t\t\t\tr = Record{Count: 1}\n\t\t\t}\n\t\t\tyears[year] = r\n\t\t}\n\t}\n\treturn years\n}",
"func (m *IndustryDataRoot) GetYears()([]YearTimePeriodDefinitionable) {\n val, err := m.GetBackingStore().Get(\"years\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]YearTimePeriodDefinitionable)\n }\n return nil\n}",
"func GetYearRatios(ctx iris.Context) {\n\tyear := ctx.URLParam(\"Year\")\n\tif year == \"\" {\n\t\tctx.StatusCode(http.StatusBadRequest)\n\t\tctx.JSON(jsonError{\"Ratios annuels : année manquante\"})\n\t\treturn\n\t}\n\tdb := ctx.Values().Get(\"db\").(*sql.DB)\n\ty, err := strconv.Atoi(year)\n\tif err != nil {\n\t\tctx.StatusCode(http.StatusBadRequest)\n\t\tctx.JSON(jsonError{\"Ratios annuels, format année : \" + err.Error()})\n\t\treturn\n\t}\n\tvar resp models.YearRatios\n\tif err = resp.GetAll(int64(y), db); err != nil {\n\t\tctx.StatusCode(http.StatusInternalServerError)\n\t\tctx.JSON(jsonError{\"Ratios annuels, requête : \" + err.Error()})\n\t\treturn\n\t}\n\tctx.StatusCode(http.StatusOK)\n\tctx.JSON(resp)\n}",
"func (p *Postgres) ListStudyYears() (res []store.StudyYear, err error) {\n\terr = pgh.Tx(p.connPool, pgh.TxerFunc(func(tx *pgx.Tx) error {\n\t\trows, err := tx.Query(`SELECT id, name FROM study_years`)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to query list all study years\")\n\t\t}\n\t\tvar s store.StudyYear\n\t\tfor rows.Next() {\n\t\t\ts = store.StudyYear{}\n\t\t\tif err = rows.Scan(&s.ID, &s.Name); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"failed to scan study year\")\n\t\t\t}\n\t\t\tres = append(res, s)\n\t\t}\n\t\treturn nil\n\t}))\n\treturn res, err\n}",
"func (o BucketObjectLockConfigurationRuleDefaultRetentionPtrOutput) Years() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *BucketObjectLockConfigurationRuleDefaultRetention) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Years\n\t}).(pulumi.IntPtrOutput)\n}",
"func (m *IndustryDataRoot) SetYears(value []YearTimePeriodDefinitionable)() {\n err := m.GetBackingStore().Set(\"years\", value)\n if err != nil {\n panic(err)\n }\n}",
"func ListAllByYear(yearStr string) ([]byte, error) {\n\tyear, err := strconv.Atoi(yearStr)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Cannot convert year to int\")\n\t}\n\twinnersByYear := Winners{}\n\tfor _, winner := range winners.Winners {\n\t\tif winner.Year == year {\n\t\t\twinnersByYear.Winners = []Winner{winner}\n\t\t\tbreak\n\t\t}\n\t}\n\tjson, err := json.Marshal(winnersByYear)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Error marshalling JSON\")\n\t}\n\n\treturn json, nil\n}",
"func NewYears(y int64, d string) Years {\n\treturn Years{\n\t\tValue: y,\n\t\tDescription: d,\n\t}\n}",
"func Years(human int) int {\n\treturn 7 * human\n}",
"func GetEPrintIDsForYear(config *Config, repoID string, year int) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE date_type = \"published\" AND date_year = ? ORDER BY date_year DESC, date_month DESC, date_day DESC`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, year)\n}",
"func YearOf(date time.Time) []time.Time {\n\treturn aYear(date)\n}",
"func (b *YearBudgetCredits) GetAll(year int, db *sql.DB) (err error) {\n\trows, err := db.Query(`SELECT extract(month FROM commission_date)::integer as month, \n\tchapter_id, primary_commitment, frozen_commitment, reserved_commitment\n\tFROM budget_credits \n\tWHERE (extract(day FROM commission_date), extract(month FROM commission_date)) in\n\t(SELECT max(extract(day FROM commission_date)), extract(month FROM commission_date)\n\t\tFROM budget_credits \n\t\tWHERE extract(year FROM commission_date)=$1 GROUP BY 2 ORDER BY 2,1)\n\t\t AND extract(year FROM commission_date)=$1\n\tORDER BY 1,2`, year)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar r YearBudgetCredit\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tif err = rows.Scan(&r.Month, &r.ChapterID, &r.PrimaryCommitment,\n\t\t\t&r.FrozenCommitment, &r.ReservedCommitment); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tb.YearBudgetCredits = append(b.YearBudgetCredits, r)\n\t}\n\terr = rows.Err()\n\tif len(b.YearBudgetCredits) == 0 {\n\t\tb.YearBudgetCredits = []YearBudgetCredit{}\n\t}\n\treturn err\n}",
"func (m *UserMutation) YearIDs() (ids []int) {\n\tif id := m.year; id != nil {\n\t\tids = append(ids, *id)\n\t}\n\treturn\n}",
"func TestGetYear(t *testing.T) {\n\trouter := configRouter()\n\tw := httptest.NewRecorder()\n\n\treq, _ := http.NewRequest(\"GET\", \"/year?id=2\", nil)\n\trouter.ServeHTTP(w, req)\n\n\tassert.Equal(t, 200, w.Code)\n\tassert.Equal(t, `{\"ErrorMSG\":\"\",\"values\":[23.93,50,62.48,0,50.5,0,87.1,0,0,0,29.39,3.6]}`, w.Body.String())\n}",
"func (pc *PersonController) GetPersonsByValidLicense(w http.ResponseWriter, r *http.Request) {\n\n\t// get the valid_license parameter\n\tvars := mux.Vars(r)\n\tsearchValue := vars[\"valid_license\"]\n\tif searchValue == \"\" {\n\t\trespondWithError(w, http.StatusBadRequest, \"missing search criteria\")\n\t\treturn\n\t}\n\n\t// adjust operator and predicate if neccessary\n\top, predicate, err := buildBoolQueryComponents(searchValue)\n\tif err != nil {\n\t\trespondWithError(w, http.StatusBadRequest, fmt.Sprintf(`{\"GetPersonsByValidLicense\": \"%s\"}`, err))\n\t\treturn\n\t}\n\n\t// build GetParam\n\tp := sqac.GetParam{\n\t\tFieldName: \"valid_license\",\n\t\tOperand: op,\n\t\tParamValue: predicate,\n\t\tNextOperator: \"\",\n\t}\n\tparams := []sqac.GetParam{}\n\tparams = append(params, p)\n\n\t// build base Href; common for each selected row\n\turlString := buildHrefBasic(r, true)\n\n\t// call the common Person GetSet method\n\tpersons, count, countReq, err := pc.getPersonSet(w, r, params)\n\tif persons != nil && countReq == false {\n\t\tfor i, l := range persons {\n\t\t\tpersons[i].Href = urlString + \"person/\" + strconv.FormatUint(uint64(l.ID), 10)\n\t\t}\n\t\trespondWithJSON(w, http.StatusOK, persons)\n\t\treturn\n\t}\n\n\tif countReq == true {\n\t\trespondWithCount(w, http.StatusOK, count)\n\t\treturn\n\t}\n\trespondWithJSON(w, http.StatusOK, \"[]\")\n}",
"func (pc *PersonController) GetPersonsByAge(w http.ResponseWriter, r *http.Request) {\n\n\t// get the age parameter\n\tvars := mux.Vars(r)\n\tsearchValue := vars[\"age\"]\n\tif searchValue == \"\" {\n\t\trespondWithError(w, http.StatusBadRequest, \"missing search criteria\")\n\t\treturn\n\t}\n\n\t// adjust operator and predicate if neccessary\n\top, predicate, err := buildUIntQueryComponent(searchValue)\n\tif err != nil {\n\t\trespondWithError(w, http.StatusBadRequest, fmt.Sprintf(`{\"GetPersonsByAge\": \"%s\"}`, err))\n\t\treturn\n\t}\n\n\t// build GetParam\n\tp := sqac.GetParam{\n\t\tFieldName: \"age\",\n\t\tOperand: op,\n\t\tParamValue: predicate,\n\t\tNextOperator: \"\",\n\t}\n\tparams := []sqac.GetParam{}\n\tparams = append(params, p)\n\n\t// build base Href; common for each selected row\n\turlString := buildHrefBasic(r, true)\n\n\t// call the common Person GetSet method\n\tpersons, count, countReq, err := pc.getPersonSet(w, r, params)\n\tif persons != nil && countReq == false {\n\t\tfor i, l := range persons {\n\t\t\tpersons[i].Href = urlString + \"person/\" + strconv.FormatUint(uint64(l.ID), 10)\n\t\t}\n\t\trespondWithJSON(w, http.StatusOK, persons)\n\t\treturn\n\t}\n\n\tif countReq == true {\n\t\trespondWithCount(w, http.StatusOK, count)\n\t\treturn\n\t}\n\trespondWithJSON(w, http.StatusOK, \"[]\")\n}",
"func HumanYearsInDogYears(n int) int {\n\treturn n * 7\n}",
"func (a *AnnualProgrammation) GetAll(year int, db *sql.DB) (err error) {\n\tqry := `WITH dates AS (\n\t\tSELECT DISTINCT date FROM financial_commitment WHERE DATE_PART('YEAR', date)=$1\n\t\tUNION\n\t\tSELECT DISTINCT c.date FROM programmings p, commissions c\n\t\t\tWHERE p.commission_id = c.id AND p.year = $1\n\t\tUNION\n\t\tSELECT DISTINCT commission_date AS date FROM pending_commitments\n\t\t\tWHERE DATE_PART('YEAR', commission_date) = $1)\n\tSELECT q.operation_number::varchar,q.name::varchar,q.chapter_code,\n\t\tq.step_name::varchar,q.category_name::varchar,q.date,q.programmings::bigint,\n\t\tq.total_programmings::bigint,q.state_ratio::double precision,\n\t\tq.commitment::bigint,q.pendings::bigint \n\tFROM \n\t\t(SELECT op.number AS operation_number,op.name,op.chapter_code,\n\t\t\top.step_name,op.category_name,op.date,pr.value AS programmings,\n\t\t\tpr.total_value AS total_programmings,pr.state_ratio,fc.value AS commitment, \n\t\t\tpe.proposed_value AS pendings \n\t\tFROM\n\t\t\t(SELECT op.id,op.name,op.number,bud.code as chapter_code,\n\t\t\t\tstep.name AS step_name,category.name AS category_name, dates.date\n\t\t\tFROM physical_op op\n\t\t\tCROSS JOIN dates\n\t\t\tLEFT OUTER JOIN \n\t\t\t\t(SELECT ba.id, bc.code FROM budget_action ba \n\t\t\t\t\tJOIN budget_program bp ON ba.program_id=bp.id\n\t\t\t\t\tJOIN budget_chapter bc ON bp.chapter_id=bc.id) bud\n\t\t\t\tON op.budget_action_id = bud.id\n\t\t\tLEFT OUTER JOIN step ON op.step_id = step.id\n\t\t\tLEFT OUTER JOIN category ON op.category_id = category.id) op\n\t\t\tLEFT JOIN\n\t\t\t\t(SELECT p.physical_op_id,SUM(p.value) AS value, \n\t\t\t\t\tSUM(p.total_value) AS total_value,p.state_ratio,c.date \n\t\t\t\tFROM programmings p,commissions c\n\t\t\t\tWHERE p.commission_id = c.id GROUP BY 1,4,5) pr\n\t\t\tON pr.date=op.date AND pr.physical_op_id=op.id\n\t\t\tLEFT JOIN \n\t\t\t\t(SELECT SUM(value) AS value,physical_op_id,financial_commitment.date,\n\t\t\t\t\tnull AS total_value,null as state_ratio \n\t\t\t\tFROM financial_commitment GROUP BY 2,3) fc\n\t\t\tON fc.physical_op_id = op.id AND fc.date=op.date\n\t\t\tLEFT JOIN \n\t\t\t\t(SELECT SUM(proposed_value) AS proposed_value,physical_op_id, \n\t\t\t\t\tcommission_date AS date,null AS total_value,null as state_ratio\n\t\t\t\tFROM pending_commitments GROUP BY 2,3) pe\n\t\t\tON pe.physical_op_id = op.id AND pe.date=op.date\n\t\t\tWHERE pr.value NOTNULL OR fc.value NOTNULL OR pe.proposed_value NOTNULL\n\t\t\t\n\t\t\tUNION ALL\n\t\t\t\n\t\t\tSELECT NULL as operation_number,fc.name AS name,\n\t\t\t\tfc.chapter::integer as chapter_code,NULL as step_name,\n\t\t\t\tNULL as category_name,fc.date,NULL AS programmings,\n\t\t\t\tNULL as total_programmings,NULL as state_ratio,fc.value AS commitment,\n\t\t\t\tNULL AS pendings\n\t\t\tFROM financial_commitment fc\n\t\t\tWHERE fc.physical_op_id ISNULL AND DATE_PART('YEAR',fc.Date)= $1\n\t\t\t\n\t\t\tUNION ALL\n\t\t\n\t\t\tSELECT NULL as operation_number,pe.name AS name,\n\t\t\t\tpe.chapter::integer as chapter_code,NULL as step_name,\n\t\t\t\tNULL as category_name,pe.commission_date AS date,NULL AS programmings,\n\t\t\t\tNULL as total_programmings,NULL as state_ratio,NULL AS commitment,\n\t\t\t\tpe.proposed_value AS pendings\n\t\t\tFROM pending_commitments pe\n\t\t\tWHERE pe.physical_op_id ISNULL AND DATE_PART('YEAR',pe.commission_date)=$1\n\t\t\tORDER BY 3, 1) q`\n\trows, err := db.Query(qry, year)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar r AnnualProgLine\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tif err = rows.Scan(&r.OperationNumber, &r.Name, &r.ChapterCode, &r.StepName,\n\t\t\t&r.CategoryName, &r.Date, &r.Programmings, &r.TotalProgrammings,\n\t\t\t&r.StateRatio, &r.Commitment, &r.Pendings); err != nil {\n\t\t\treturn err\n\t\t}\n\t\ta.AnnualProgrammation = append(a.AnnualProgrammation, r)\n\t}\n\tif len(a.AnnualProgrammation) == 0 {\n\t\ta.AnnualProgrammation = []AnnualProgLine{}\n\t}\n\terr = rows.Err()\n\treturn err\n}",
"func HasYear() predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(YearTable, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, true, YearTable, YearColumn),\n\t\t)\n\t\tsqlgraph.HasNeighbors(s, step)\n\t})\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetEPrintsIDsForYear returns a list of published eprint IDs for a given year. | func GetEPrintIDsForYear(config *Config, repoID string, year int) ([]int, error) {
stmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE date_type = "published" AND date_year = ? ORDER BY date_year DESC, date_month DESC, date_day DESC`)
return sqlQueryIntIDs(config, repoID, stmt, year)
} | [
"func GetAllEPrintIDs(config *Config, repoID string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint\nORDER BY date_year DESC, date_month DESC, date_day DESC`)\n}",
"func GetAllYears(config *Config, repoID string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT date_year FROM eprint WHERE date_type = \"published\" AND date_year IS NOT NULL GROUP BY date_year ORDER BY date_year DESC`)\n\treturn sqlQueryInts(config, repoID, stmt)\n}",
"func GetEPrintIDsForPersonOrOrgID(config *Config, repoID string, personOrOrgType string, personOrOrgID string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprint_%s_id.eprintid AS eprintid\nFROM eprint_%s_id JOIN eprint ON (eprint_%s_id.eprintid = eprint.eprintid)\nWHERE eprint_%s_id.%s_id = ?\nORDER BY date_year DESC, date_month DESC, date_day DESC`,\n\t\tpersonOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType)\n\treturn sqlQueryIntIDs(config, repoID, stmt, personOrOrgID)\n}",
"func GetEPrintIDsInTimestampRange(config *Config, repoID string, field string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 59), 2, \"0\")) <= ?)\nORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,\n\t\tfield, field, field, field, field, field, field, field, field, field, field, field,\n\t\tfield, field, field, field, field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, start, end)\n}",
"func (m *ActivitiesMutation) AcademicyearIDs() (ids []int) {\n\tif id := m.academicyear; id != nil {\n\t\tids = append(ids, *id)\n\t}\n\treturn\n}",
"func GetEPrintIDsForDateType(config *Config, repoID string, dateType string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint\nWHERE ((date_type) = ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 1), 2, \"0\")) >= ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 28), 2, \"0\")) <= ?)\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, dateType, start, end)\n}",
"func (m *UserMutation) YearIDs() (ids []int) {\n\tif id := m.year; id != nil {\n\t\tids = append(ids, *id)\n\t}\n\treturn\n}",
"func (db DB) GetEvents(year int) ([]event.BasicEvent, error) {\n\tif events, ok := db.Events[year]; ok {\n\t\treturn events, nil\n\t}\n\treturn []event.BasicEvent{}, ErrNoSuchYear\n}",
"func GetAllEPrintIDsWithStatus(config *Config, repoID string, status string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint WHERE (eprint_status = ?) ORDER BY date_year DESC, date_month DESC, date_day DESC`, status)\n}",
"func GetEPrintIDsForUniqueID(config *Config, repoID string, field string, value string) ([]int, error) {\n\t// NOTE: There should only be one eprint per DOI but we have dirty data because the field is not contrained as Unique\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE LOWER(%s) = LOWER(?)`, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, value)\n}",
"func GetEPrintIDsForORCID(config *Config, repoID string, orcid string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprint.eprintid AS eprintid\nFROM eprint_creators_orcid JOIN eprint ON (eprint_creators_orcid.eprintid = eprint.eprintid)\nWHERE creators_orcid = ?\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`, orcid)\n}",
"func GetEPrintIDsForPersonName(config *Config, repoID, field string, family string, given string) ([]int, error) {\n\tconditions := []string{}\n\tif strings.Contains(family, \"*\") || strings.Contains(given, \"%\") {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_family LIKE ?`, field))\n\t} else if family != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_family = ?`, field))\n\t}\n\tif strings.Contains(given, \"*\") || strings.Contains(given, \"%\") {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_given LIKE ?`, field))\n\t} else if given != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_given = ?`, field))\n\t}\n\tstmt := fmt.Sprintf(`SELECT eprint.eprintid AS eprintid\nFROM eprint_%s JOIN eprint ON (eprint_%s.eprintid = eprint.eprintid)\nWHERE %s\nORDER BY %s_family ASC, %s_given ASC, eprint.date_year DESC, eprint.date_month DESC, eprint.date_day DESC`,\n\t\tfield, field, strings.Join(conditions, \" AND \"), field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, family, given)\n}",
"func GetEPrintIDsWithStatusInTimestampRange(config *Config, repoID string, status string, field string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE\n(eprint_status = ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 59), 2, \"0\")) <= ?)\nORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,\n\t\tfield, field, field, field, field, field,\n\t\tfield, field, field, field, field, field,\n\t\tfield, field, field, field, field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func GetEPrintIDsWithStatus(config *Config, repoID string, status string, start string, end string) ([]int, error) {\n\tstmt := `SELECT eprintid FROM eprint WHERE (eprint_status = ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 59), 2, \"0\")) <= ?)\nORDER BY lastmod_year DESC, lastmod_month DESC, lastmod_day DESC,\n lastmod_hour DESC, lastmod_minute DESC, lastmod_minute DESC`\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func ImportEPrints(config *Config, repoID string, ds *DataSource, eprints *EPrints) ([]int, error) {\n\tvar importErrors error\n\tids := []int{}\n\n\tif config.Connections == nil {\n\t\treturn nil, fmt.Errorf(`no databases are not configured`)\n\t}\n\t_, ok := config.Connections[repoID]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(`%s database connection not configured`, repoID)\n\t}\n\n\t// Check to make sure updates are allowed if non-Zero\n\t// eprint ids present.\n\tfor _, eprint := range eprints.EPrint {\n\t\tif eprint.EPrintID != 0 {\n\t\t\treturn nil, fmt.Errorf(\"create failed eprint id %d in %s\", eprint.EPrintID, repoID)\n\t\t}\n\t\tif eprint.Collection == \"\" && ds.DefaultCollection != \"\" {\n\t\t\teprint.Collection = DefaultCollection\n\t\t}\n\t\tif eprint.IDNumber == \"\" && ds.DefaultOfficialURL != \"\" {\n\t\t\teprint.IDNumber = GenerateIDNumber(eprint)\n\t\t}\n\t\tif eprint.OfficialURL == \"\" && ds.DefaultOfficialURL != \"\" {\n\t\t\teprint.OfficialURL = GenerateOfficialURL(eprint)\n\t\t}\n\t\tif eprint.Rights == \"\" && ds.DefaultRights != \"\" {\n\t\t\teprint.Rights = ds.DefaultRights\n\t\t}\n\t\tif eprint.Refereed == \"\" && eprint.Type == \"article\" &&\n\t\t\tds.DefaultRefereed != \"\" {\n\t\t\teprint.Refereed = ds.DefaultRefereed\n\t\t}\n\t\tif eprint.EPrintStatus == \"\" && ds.DefaultStatus != \"\" {\n\t\t\teprint.EPrintStatus = ds.DefaultStatus\n\t\t}\n\t\tif eprint.Abstract != \"\" && ds.StripTags {\n\t\t\tif cleaner.HasEncodedElements([]byte(eprint.Abstract)) {\n\t\t\t\teprint.Abstract = string(cleaner.StripTags([]byte(eprint.Abstract)))\n\t\t\t}\n\t\t}\n\t}\n\tfor _, eprint := range eprints.EPrint {\n\t\tid, err := SQLCreateEPrint(config, repoID, ds, eprint)\n\t\tif err != nil {\n\t\t\tif importErrors == nil {\n\t\t\t\timportErrors = err\n\t\t\t} else {\n\t\t\t\timportErrors = fmt.Errorf(\"%s; %s\", importErrors, err)\n\t\t\t}\n\t\t}\n\t\tids = append(ids, id)\n\t}\n\treturn ids, importErrors\n}",
"func YearInList(time time.Time, years []int) bool {\n\tfor _, year := range years {\n\t\tif time.Year() == year {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func GetEPrintIDsWithStatusForDateType(config *Config, repoID string, status string, dateType string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint\nWHERE (eprint_status = ? ) AND (date_type = ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 1), 2, \"0\")) >= ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 28), 2, \"0\")) <= ?)\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, dateType, start, end)\n}",
"func GetYears() map[int]Record {\n\tyears := make(map[int]Record)\n\tpages := page.GetAllPages()\n\tfor _, p := range pages {\n\t\tif p.Status == page.Published && p.Template == \"post\" {\n\t\t\tyear := p.PubDate.Year()\n\t\t\tr, present := years[year]\n\t\t\tif present {\n\t\t\t\tr.Count++\n\t\t\t} else {\n\t\t\t\tr = Record{Count: 1}\n\t\t\t}\n\t\t\tyears[year] = r\n\t\t}\n\t}\n\treturn years\n}",
"func (program Program) GetYear() string {\n\treturn program.year\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
/ Column mapping for tables. colExpr takes a column name, ifNull bool and default value. If the "ifNull" bool is true then the form expressed is `IFNULL(%s, %s) AS %s` otherwise just the column name is returned. | func colExpr(name string, ifNull bool, value string) string {
if ifNull {
return fmt.Sprintf(`IFNULL(%s, %s) AS %s`, name, value, name)
}
return name
} | [
"func doltColToSqlCol(tableName string, col schema.Column) *sql.Column {\n\treturn &sql.Column{\n\t\tName: col.Name,\n\t\tType: nomsTypeToSqlType(col.Kind),\n\t\tDefault: nil,\n\t\tNullable: col.IsNullable(),\n\t\tSource: tableName,\n\t}\n}",
"func GetColName(node Expr) string {\n\tif n, ok := node.(*ColName); ok {\n\t\treturn string(n.Name)\n\t}\n\treturn \"\"\n}",
"func TableCol(table, column string) *pb.Expr {\n\treturn SchemaTableCol(\"\", table, column)\n}",
"func SqlColToDoltCol(tag uint64, isPk bool, col *sql.Column) schema.Column {\n\t// TODO: nullness constraint\n\treturn schema.NewColumn(col.Name, tag, SqlTypeToNomsKind(col.Type), isPk)\n}",
"func Col(column string) *pb.Expr {\n\treturn SchemaTableCol(\"\", \"\", column)\n}",
"func SchemaTableCol(schema, table, column string) *pb.Expr {\n\treturn &pb.Expr{\n\t\tExpr: &pb.Expr_Col{\n\t\t\tCol: &pb.Col{\n\t\t\t\tSchema: schema,\n\t\t\t\tTable: table,\n\t\t\t\tColumn: column,\n\t\t\t},\n\t\t},\n\t}\n}",
"func ToDoltCol(tag uint64, col *sql.Column) (schema.Column, error) {\n\tvar constraints []schema.ColConstraint\n\tif !col.Nullable || col.PrimaryKey {\n\t\tconstraints = append(constraints, schema.NotNullConstraint{})\n\t}\n\ttypeInfo, err := typeinfo.FromSqlType(col.Type)\n\tif err != nil {\n\t\treturn schema.Column{}, err\n\t}\n\n\treturn schema.NewColumnWithTypeInfo(col.Name, tag, typeInfo, col.PrimaryKey, col.Default.String(), col.AutoIncrement, col.Comment, constraints...)\n}",
"func (st *symtab) NewResultColumn(expr *sqlparser.NonStarExpr, rb *route) *resultColumn {\n\trc := &resultColumn{\n\t\talias: expr.As,\n\t}\n\tif col, ok := expr.Expr.(*sqlparser.ColName); ok {\n\t\t// If no alias was specified, then the base name\n\t\t// of the column becomes the alias.\n\t\tif rc.alias.IsEmpty() {\n\t\t\trc.alias = col.Name\n\t\t}\n\t\t// If it's a col it should already have metadata.\n\t\trc.column = col.Metadata.(*column)\n\t} else {\n\t\t// We don't generate an alias if the expression is non-trivial.\n\t\t// Just to be safe, generate an anonymous column for the expression.\n\t\trc.column = &column{\n\t\t\troute: rb,\n\t\t}\n\t}\n\treturn rc\n}",
"func GetColDefaultValue(ctx sessionctx.Context, col *model.ColumnInfo) (types.Datum, error) {\n\tdefaultValue := col.GetDefaultValue()\n\tif !col.DefaultIsExpr {\n\t\treturn getColDefaultValue(ctx, col, defaultValue, nil)\n\t}\n\treturn getColDefaultExprValue(ctx, col, defaultValue.(string))\n}",
"func TableCol() *HTMLTableCol {\n\te := &HTMLTableCol{}\n\te.a = make(map[string]interface{})\n\te.tagName = \"tablecol\"\n\treturn e\n}",
"func (sqlbldr *Builder) AddParamForColumnIfDefined( aParamKey string, aColumnName string ) *Builder {\n\tif sqlbldr.isDataKeyDefined(aParamKey) {\n\t\tsqlbldr.getParamValueFromDataSource(aParamKey)\n\t\tsqlbldr.addingParam(aColumnName, aParamKey)\n\t}\n\treturn sqlbldr\n}",
"func consuructColumnAnnotation(c sqlparser.ColumnType, idx Index) string {\n\tstartGorm := \"`gorm:`\"\n\tresponse := startGorm\n\tif c.NotNull {\n\t\tresponse += \"NOT NULL;\"\n\t}\n\tif c.Autoincrement {\n\t\tresponse += \"AUTOINCREMENT;\"\n\t}\n\tif idx.PrimaryKey {\n\t\tresponse += \"PRIMARY_KEY;\"\n\t}\n\tif c.Default != nil {\n\t\tresponse += fmt.Sprintf(\"DEFAULT:`%s`\", string(c.Default.Val))\n\t}\n\tif c.Length != nil && c.Default != nil {\n\t\tresponse += fmt.Sprintf(`SIZE:\"%s\"`, string(c.Default.Val))\n\t}\n\tif response == startGorm {\n\t\treturn \"\"\n\t}\n\treturn response + \"`\"\n}",
"func NewFieldFromColumn(col Column) astutils.FieldMeta {\n\ttag := \"dd:\"\n\tvar feats []string\n\tif col.Pk {\n\t\tfeats = append(feats, \"pk\")\n\t}\n\tif col.Autoincrement {\n\t\tfeats = append(feats, \"auto\")\n\t}\n\tgoType := toGoType(col.Type, col.Nullable)\n\tif col.Nullable && !strings.HasPrefix(goType, \"*\") {\n\t\tfeats = append(feats, \"null\")\n\t}\n\tif stringutils.IsNotEmpty(string(col.Type)) {\n\t\tfeats = append(feats, fmt.Sprintf(\"type:%s\", string(col.Type)))\n\t}\n\tif stringutils.IsNotEmpty(col.Default) {\n\t\tval := col.Default\n\t\tre := regexp.MustCompile(`^\\(.+\\)$`)\n\t\tvar defaultClause string\n\t\tif strings.ToUpper(val) == \"CURRENT_TIMESTAMP\" || re.MatchString(val) {\n\t\t\tdefaultClause = fmt.Sprintf(\"default:%s\", val)\n\t\t} else {\n\t\t\tdefaultClause = fmt.Sprintf(\"default:'%s'\", val)\n\t\t}\n\t\tfeats = append(feats, defaultClause)\n\t}\n\tif stringutils.IsNotEmpty(string(col.Extra)) {\n\t\tfeats = append(feats, fmt.Sprintf(\"extra:%s\", string(col.Extra)))\n\t}\n\tfor _, idx := range col.Indexes {\n\t\tvar indexClause string\n\t\tif idx.Name == \"PRIMARY\" {\n\t\t\tcontinue\n\t\t}\n\t\tif idx.Unique {\n\t\t\tindexClause = \"unique:\"\n\t\t} else {\n\t\t\tindexClause = \"index:\"\n\t\t}\n\t\tindexClause += fmt.Sprintf(\"%s,%d,%s\", idx.Name, idx.Order, string(idx.Sort))\n\t\tfeats = append(feats, indexClause)\n\t}\n\n\treturn astutils.FieldMeta{\n\t\tName: strcase.ToCamel(col.Name),\n\t\tType: goType,\n\t\tTag: fmt.Sprintf(`%s\"%s\"`, tag, strings.Join(feats, \";\")),\n\t}\n}",
"func NullRangeColumnExpr(typ Type) RangeColumnExpr {\n\treturn RangeColumnExpr{\n\t\tLowerBound: BelowNull{},\n\t\tUpperBound: AboveNull{},\n\t\tTyp: typ,\n\t}\n}",
"func (s *DbService) getColMapping(col string) ColumnMapping {\n\tmapping, exists := s.config.Mapping[col]\n\tif !exists {\n\t\tmapping = ColumnMapping{Type: s.config.DefaultColType, Index: false}\n\t}\n\n\t// set required default fields if not set\n\tif mapping.Type == \"\" {\n\t\tmapping.Type = s.config.DefaultColType\n\t}\n\n\treturn mapping\n}",
"func (t *Table) ColumnName(field string) (string, error) {\n\tif t.columnsByField == nil {\n\t\tt.columnsByField = make(map[string]Column)\n\t}\n\tif column, ok := t.columnsByField[field]; ok {\n\t\treturn column.Name(), nil\n\t}\n\treturn \"\", fmt.Errorf(\"no mapping for field %q\", field)\n}",
"func (b *Builder) shouldCreateDefaultColumn(texpr tree.TypedExpr) bool {\n\tif texpr.ResolvedType() == types.EmptyTuple {\n\t\t// This is only to support crdb_internal.unary_table().\n\t\treturn false\n\t}\n\n\t// We need to create a default column with a default name when\n\t// the function return type doesn't declare any return labels.\n\treturn len(texpr.ResolvedType().TupleLabels()) == 0\n}",
"func (psql *PostgresDialect) ColumnDefinition(col Column) string {\n\n\t//TODO convert to string templates if practical\n\n\tddl := col.Name\n\tddl += \" \"\n\tddl += psql.columnTypeDefinition(col)\n\n\treturn ddl\n}",
"func AddColumn(dbMap *gorp.DbMap, db string, table string, columnToAdd string,\n\tdataSpec string, colAfter string, defaultQry string) {\n\ts, err := dbMap.SelectStr(\"SELECT column_name FROM \" +\n\t\t\"information_schema.columns WHERE table_schema = '\" + db +\n\t\t\"' AND table_name = '\" + table + \"' AND column_name = '\" +\n\t\tcolumnToAdd + \"'\")\n\tcheckErr(err, \"checking whether column\"+columnToAdd+\" exists failed\")\n\tif s == \"\" {\n\t\t// TODO would be nice to use parameter binding here but gorp seems to\n\t\t// only provide that for select queries\n\t\t_, err = dbMap.Exec(\"ALTER TABLE `\" + table + \"` ADD COLUMN `\" +\n\t\t\tcolumnToAdd + \"` \" + dataSpec + \" AFTER `\" + colAfter + \"`\")\n\t\tcheckErr(err, \"adding new column \"+columnToAdd+\" failed\")\n\t\tif defaultQry != \"\" {\n\t\t\t_, err = dbMap.Exec(defaultQry)\n\t\t\tcheckErr(err, defaultQry+\" failed\")\n\t\t}\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
eprintToColumnsAndValues for a given EPrints struct generate a list of column names to query along with a recieving values array. Return a list of column names (with null handle and aliases) and values. The bool ifNull will control the type of expression of the column. | func eprintToColumnsAndValues(eprint *EPrint, columnsIn []string, ifNull bool) ([]string, []interface{}) {
columnsOut := []string{}
values := []interface{}{}
for i, key := range columnsIn {
switch key {
case "eprintid":
values = append(values, &eprint.EPrintID)
columnsOut = append(columnsOut, key)
case "rev_number":
values = append(values, &eprint.RevNumber)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "eprint_status":
values = append(values, &eprint.EPrintStatus)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "userid":
values = append(values, &eprint.UserID)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "dir":
values = append(values, &eprint.Dir)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "datestamp_year":
values = append(values, &eprint.DatestampYear)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "datestamp_month":
values = append(values, &eprint.DatestampMonth)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "datestamp_day":
values = append(values, &eprint.DatestampDay)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "datestamp_hour":
values = append(values, &eprint.DatestampHour)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "datestamp_minute":
values = append(values, &eprint.DatestampMinute)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "datestamp_second":
values = append(values, &eprint.DatestampSecond)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "lastmod_year":
values = append(values, &eprint.LastModifiedYear)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "lastmod_month":
values = append(values, &eprint.LastModifiedMonth)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "lastmod_day":
values = append(values, &eprint.LastModifiedDay)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "lastmod_hour":
values = append(values, &eprint.LastModifiedHour)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "lastmod_minute":
values = append(values, &eprint.LastModifiedMinute)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "lastmod_second":
values = append(values, &eprint.LastModifiedSecond)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "status_changed_year":
values = append(values, &eprint.StatusChangedYear)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "status_changed_month":
values = append(values, &eprint.StatusChangedMonth)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "status_changed_day":
values = append(values, &eprint.StatusChangedDay)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "status_changed_hour":
values = append(values, &eprint.StatusChangedHour)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "status_changed_minute":
values = append(values, &eprint.StatusChangedMinute)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "status_changed_second":
values = append(values, &eprint.StatusChangedSecond)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "type":
values = append(values, &eprint.Type)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "metadata_visibility":
values = append(values, &eprint.MetadataVisibility)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "title":
values = append(values, &eprint.Title)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "ispublished":
values = append(values, &eprint.IsPublished)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "full_text_status":
values = append(values, &eprint.FullTextStatus)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "keywords":
values = append(values, &eprint.Keywords)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "note":
values = append(values, &eprint.Note)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "abstract":
values = append(values, &eprint.Abstract)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "date_year":
values = append(values, &eprint.DateYear)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "date_month":
values = append(values, &eprint.DateMonth)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "date_day":
values = append(values, &eprint.DateDay)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "date_type":
values = append(values, &eprint.DateType)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "series":
values = append(values, &eprint.Series)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "volume":
values = append(values, &eprint.Volume)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "number":
values = append(values, &eprint.Number)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "publication":
values = append(values, &eprint.Publication)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "publisher":
values = append(values, &eprint.Publisher)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "place_of_pub":
values = append(values, &eprint.PlaceOfPub)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "edition":
values = append(values, &eprint.Edition)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "pagerange":
values = append(values, &eprint.PageRange)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "pages":
values = append(values, &eprint.Pages)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "event_type":
values = append(values, &eprint.EventType)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "event_title":
values = append(values, &eprint.EventTitle)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "event_location":
values = append(values, &eprint.EventLocation)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "event_dates":
values = append(values, &eprint.EventDates)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "id_number":
values = append(values, &eprint.IDNumber)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "refereed":
values = append(values, &eprint.Refereed)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "isbn":
values = append(values, &eprint.ISBN)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "issn":
values = append(values, &eprint.ISSN)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "book_title":
values = append(values, &eprint.BookTitle)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "official_url":
values = append(values, &eprint.OfficialURL)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "alt_url":
values = append(values, &eprint.AltURL)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "rights":
values = append(values, &eprint.Rights)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "collection":
values = append(values, &eprint.Collection)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "reviewer":
values = append(values, &eprint.Reviewer)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "official_cit":
values = append(values, &eprint.OfficialCitation)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "monograph_type":
values = append(values, &eprint.MonographType)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "suggestions":
values = append(values, &eprint.Suggestions)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "pres_type":
values = append(values, &eprint.PresType)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "succeeds":
values = append(values, &eprint.Succeeds)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "commentary":
values = append(values, &eprint.Commentary)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "contact_email":
values = append(values, &eprint.ContactEMail)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "fileinfo":
values = append(values, &eprint.FileInfo)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "latitude":
values = append(values, &eprint.Latitude)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0.0`))
case "longitude":
values = append(values, &eprint.Longitude)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0.0`))
case "department":
values = append(values, &eprint.Department)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "output_media":
values = append(values, &eprint.OutputMedia)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "num_pieces":
values = append(values, &eprint.NumPieces)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "composition_type":
values = append(values, &eprint.CompositionType)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "data_type":
values = append(values, &eprint.DataType)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "pedagogic_type":
values = append(values, new(string))
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "learning_level":
values = append(values, &eprint.LearningLevelText)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "completion_time":
values = append(values, &eprint.CompletionTime)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "task_purpose":
values = append(values, &eprint.TaskPurpose)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "doi":
values = append(values, &eprint.DOI)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "pmc_id":
values = append(values, &eprint.PMCID)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "pmid":
values = append(values, &eprint.PMID)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "parent_url":
values = append(values, &eprint.ParentURL)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "toc":
values = append(values, &eprint.TOC)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "interviewer":
values = append(values, &eprint.Interviewer)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "interviewdate":
values = append(values, &eprint.InterviewDate)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "nonsubj_keywords":
values = append(values, &eprint.NonSubjKeywords)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "season":
values = append(values, &eprint.Season)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "classification_code":
values = append(values, &eprint.ClassificationCode)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "sword_depositor":
values = append(values, &eprint.SwordDepositor)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "sword_depository":
values = append(values, &eprint.SwordDepository)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "sword_slug":
values = append(values, &eprint.SwordSlug)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "importid":
values = append(values, &eprint.ImportID)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "patent_applicant":
values = append(values, &eprint.PatentApplicant)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "patent_number":
values = append(values, &eprint.PatentNumber)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "institution":
values = append(values, &eprint.Institution)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "thesis_type":
values = append(values, &eprint.ThesisType)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "thesis_degree":
values = append(values, &eprint.ThesisDegree)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "thesis_degree_grantor":
values = append(values, &eprint.ThesisDegreeGrantor)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "thesis_degree_date_year":
values = append(values, &eprint.ThesisDegreeDateYear)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "thesis_degree_date_month":
values = append(values, &eprint.ThesisDegreeDateMonth)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "thesis_degree_date_day":
values = append(values, &eprint.ThesisDegreeDateDay)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "thesis_submitted_date_year":
values = append(values, &eprint.ThesisSubmittedDateYear)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "thesis_submitted_date_month":
values = append(values, &eprint.ThesisSubmittedDateMonth)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "thesis_submitted_date_day":
values = append(values, &eprint.ThesisSubmittedDateDay)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "thesis_defense_date":
values = append(values, &eprint.ThesisDefenseDate)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "thesis_defense_date_year":
values = append(values, &eprint.ThesisDefenseDateYear)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "thesis_defense_date_month":
values = append(values, &eprint.ThesisDefenseDateMonth)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "thesis_defense_date_day":
values = append(values, &eprint.ThesisDefenseDateDay)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "thesis_approved_date_year":
values = append(values, &eprint.ThesisApprovedDateYear)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "thesis_approved_date_month":
values = append(values, &eprint.ThesisApprovedDateMonth)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "thesis_approved_date_day":
values = append(values, &eprint.ThesisApprovedDateDay)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "thesis_public_date_year":
values = append(values, &eprint.ThesisPublicDateYear)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "thesis_public_date_month":
values = append(values, &eprint.ThesisPublicDateMonth)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "thesis_public_date_day":
values = append(values, &eprint.ThesisPublicDateDay)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "thesis_author_email":
values = append(values, &eprint.ThesisAuthorEMail)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "hide_thesis_author_email":
values = append(values, &eprint.HideThesisAuthorEMail)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "gradofc_approval_date":
values = append(values, &eprint.GradOfficeApprovalDate)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "gradofc_approval_date_year":
values = append(values, &eprint.GradOfficeApprovalDateYear)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "gradofc_approval_date_month":
values = append(values, &eprint.GradOfficeApprovalDateMonth)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "gradofc_approval_date_day":
values = append(values, &eprint.GradOfficeApprovalDateDay)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "thesis_awards":
values = append(values, &eprint.ThesisAwards)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "review_status":
values = append(values, &eprint.ReviewStatus)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "copyright_statement":
values = append(values, &eprint.CopyrightStatement)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "source":
values = append(values, &eprint.Source)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "replacedby":
values = append(values, &eprint.ReplacedBy)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "item_issues_count":
values = append(values, &eprint.ItemIssuesCount)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "errata":
values = append(values, &eprint.ErrataText)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "coverage_dates":
values = append(values, &eprint.CoverageDates)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "edit_lock_user":
values = append(values, &eprint.EditLockUser)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "edit_lock_since":
values = append(values, &eprint.EditLockSince)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
case "edit_lock_until":
values = append(values, &eprint.EditLockUntil)
columnsOut = append(columnsOut, colExpr(key, ifNull, `0`))
// The follow values represent sub tables and processed separately.
case "patent_classification":
values = append(values, &eprint.PatentClassificationText)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "language":
values = append(values, &eprint.Language)
columnsOut = append(columnsOut, colExpr(key, ifNull, `""`))
case "referencetext":
// NOTE: This is just an ignore column, CaltechES has this as a VARCHAR column in the eprint
// table and all values are NULL. None of our other repositories have this columns. Possibly
// added in error or is a legacy EPrint column that was migrated from an older version but was not used.
default:
// Handle case where we have value that is unmapped or not available in EPrint struct
log.Printf("could not map %q (col. %d, eprintid %d) into EPrint struct", key, i, eprint.EPrintID)
}
}
return columnsOut, values
} | [
"func (a *Generator) colvals(fields []*internal.Field, ignoreNames ...interface{}) string {\n\tignore := ignoreFromMultiTypes(ignoreNames)\n\n\tstr := \"\"\n\ti := 0\n\tfor _, f := range fields {\n\t\tif ignore[f.Name] {\n\t\t\tcontinue\n\t\t}\n\n\t\tif i != 0 {\n\t\t\tstr = str + \", \"\n\t\t}\n\t\tstr = str + a.loader.NthParam(i)\n\t\ti++\n\t}\n\n\treturn str\n}",
"func Columns(s interface{}) string {\n\tv := reflect.ValueOf(s)\n\tfields := getFieldInfo(v.Type())\n\n\tnames := make([]string, 0, len(fields))\n\tfor f := range fields {\n\t\tnames = append(names, f)\n\t}\n\n\tsort.Strings(names)\n\treturn strings.Join(names, \", \")\n}",
"func ColumnAsArray(columns []string, values ...interface{}) (results [][]interface{}) {\n\tvar indirectValue reflect.Value\n\tfor _, value := range values {\n\t\tif v, ok := value.(reflect.Value); ok {\n\t\t\tindirectValue = v\n\t\t} else {\n\t\t\tindirectValue = reflect.ValueOf(value)\n\t\t}\n\t\tif indirectValue.Kind() == reflect.Ptr {\n\t\t\tindirectValue = indirectValue.Elem()\n\t\t}\n\n\t\tswitch indirectValue.Kind() {\n\t\tcase reflect.Slice:\n\t\t\tfor i := 0; i < indirectValue.Len(); i++ {\n\t\t\t\tvar result []interface{}\n\t\t\t\tobject := indirectValue.Index(i)\n\t\t\t\tif object.Kind() == reflect.Ptr {\n\t\t\t\t\tobject = object.Elem()\n\t\t\t\t}\n\t\t\t\tvar hasValue = false\n\t\t\t\tfor _, column := range columns {\n\t\t\t\t\tfield := object.FieldByName(column)\n\t\t\t\t\tif hasValue || !IsBlank(field) {\n\t\t\t\t\t\thasValue = true\n\t\t\t\t\t}\n\t\t\t\t\tresult = append(result, field.Interface())\n\t\t\t\t}\n\n\t\t\t\tif hasValue {\n\t\t\t\t\tresults = append(results, result)\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Struct:\n\t\t\tvar result []interface{}\n\t\t\tvar hasValue = false\n\t\t\tfor _, column := range columns {\n\t\t\t\tfield := indirectValue.FieldByName(column)\n\t\t\t\tif hasValue || !IsBlank(field) {\n\t\t\t\t\thasValue = true\n\t\t\t\t}\n\t\t\t\tresult = append(result, field.Interface())\n\t\t\t}\n\n\t\t\tif hasValue {\n\t\t\t\tresults = append(results, result)\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}",
"func Columns(val interface{}) Cols {\n\tif mp, ok := val.(MapProxyer); ok {\n\t\tval = mp.MapProxy()\n\t}\n\n\tt := reflect.TypeOf(val)\n\n\tif t.Kind() == reflect.Map {\n\t\treturn mapCols(reflect.ValueOf(val))\n\t}\n\n\treturn structCols(t)\n}",
"func BuildUpdateColumns(val interface{}) (string, []interface{}) {\n\tt := reflect.ValueOf(val).Elem()\n\ttypeOfT := t.Type()\n\tvar setFields []string\n\tvar values []interface{}\n\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tvar tag reflect.StructTag = typeOfT.Field(i).Tag\n\t\tvar field reflect.Value = t.Field(i)\n\n\t\tvar columnName *string = GetColumnNameFromTag(&tag)\n\t\tif columnName == nil {\n\t\t\tcontinue\n\t\t}\n\t\tsetFields = append(setFields, fmt.Sprintf(\"%s = $%d\", *columnName, i + 1) )\n\t\tvalues = append(values, field.Interface() )\n\t}\n\treturn strings.Join(setFields,\", \"), values\n}",
"func colExpr(name string, ifNull bool, value string) string {\n\tif ifNull {\n\t\treturn fmt.Sprintf(`IFNULL(%s, %s) AS %s`, name, value, name)\n\t}\n\treturn name\n}",
"func GetColumnsArr(val2test map[interface{}]interface{}) (list []interface{}) {\n\tfor v, t := range val2test {\n\t\tlist = appendToSliceIfNotNil(t, v, list)\n\t}\n\treturn\n}",
"func (br *blockResult) getColumnValues(idx int) []string {\n\tc := &br.cs[idx]\n\tif c.values != nil {\n\t\treturn c.values\n\t}\n\n\tbuf := br.buf\n\tvaluesBuf := br.valuesBuf\n\tvaluesBufLen := len(valuesBuf)\n\n\tif c.isConst {\n\t\tv := c.encodedValues[0]\n\t\tfor range br.timestamps {\n\t\t\tvaluesBuf = append(valuesBuf, v)\n\t\t}\n\t\tc.values = valuesBuf[valuesBufLen:]\n\t\tbr.valuesBuf = valuesBuf\n\t\treturn c.values\n\t}\n\tif c.isTime {\n\t\tfor _, timestamp := range br.timestamps {\n\t\t\tt := time.Unix(0, timestamp).UTC()\n\t\t\tbufLen := len(buf)\n\t\t\tbuf = t.AppendFormat(buf, time.RFC3339Nano)\n\t\t\ts := bytesutil.ToUnsafeString(buf[bufLen:])\n\t\t\tvaluesBuf = append(valuesBuf, s)\n\t\t}\n\t\tc.values = valuesBuf[valuesBufLen:]\n\t\tbr.buf = buf\n\t\tbr.valuesBuf = valuesBuf\n\t\treturn c.values\n\t}\n\n\tappendValue := func(v string) {\n\t\tbufLen := len(buf)\n\t\tbuf = append(buf, v...)\n\t\ts := bytesutil.ToUnsafeString(buf[bufLen:])\n\t\tvaluesBuf = append(valuesBuf, s)\n\t}\n\n\tswitch c.valueType {\n\tcase valueTypeString:\n\t\tc.values = c.encodedValues\n\t\treturn c.values\n\tcase valueTypeDict:\n\t\tdictValues := c.dictValues\n\t\tfor _, v := range c.encodedValues {\n\t\t\tdictIdx := v[0]\n\t\t\tappendValue(dictValues[dictIdx])\n\t\t}\n\tcase valueTypeUint8:\n\t\tbb := bbPool.Get()\n\t\tfor _, v := range c.encodedValues {\n\t\t\tn := uint64(v[0])\n\t\t\tbb.B = strconv.AppendUint(bb.B[:0], n, 10)\n\t\t\tappendValue(bytesutil.ToUnsafeString(bb.B))\n\t\t}\n\t\tbbPool.Put(bb)\n\tcase valueTypeUint16:\n\t\tbb := bbPool.Get()\n\t\tfor _, v := range c.encodedValues {\n\t\t\tb := bytesutil.ToUnsafeBytes(v)\n\t\t\tn := uint64(encoding.UnmarshalUint16(b))\n\t\t\tbb.B = strconv.AppendUint(bb.B[:0], n, 10)\n\t\t\tappendValue(bytesutil.ToUnsafeString(bb.B))\n\t\t}\n\t\tbbPool.Put(bb)\n\tcase valueTypeUint32:\n\t\tbb := bbPool.Get()\n\t\tfor _, v := range c.encodedValues {\n\t\t\tb := bytesutil.ToUnsafeBytes(v)\n\t\t\tn := uint64(encoding.UnmarshalUint32(b))\n\t\t\tbb.B = strconv.AppendUint(bb.B[:0], n, 10)\n\t\t\tappendValue(bytesutil.ToUnsafeString(bb.B))\n\t\t}\n\t\tbbPool.Put(bb)\n\tcase valueTypeUint64:\n\t\tbb := bbPool.Get()\n\t\tfor _, v := range c.encodedValues {\n\t\t\tb := bytesutil.ToUnsafeBytes(v)\n\t\t\tn := encoding.UnmarshalUint64(b)\n\t\t\tbb.B = strconv.AppendUint(bb.B[:0], n, 10)\n\t\t\tappendValue(bytesutil.ToUnsafeString(bb.B))\n\t\t}\n\t\tbbPool.Put(bb)\n\tcase valueTypeFloat64:\n\t\tbb := bbPool.Get()\n\t\tfor _, v := range c.encodedValues {\n\t\t\tbb.B = toFloat64String(bb.B[:0], v)\n\t\t\tappendValue(bytesutil.ToUnsafeString(bb.B))\n\t\t}\n\t\tbbPool.Put(bb)\n\tcase valueTypeIPv4:\n\t\tbb := bbPool.Get()\n\t\tfor _, v := range c.encodedValues {\n\t\t\tbb.B = toIPv4String(bb.B[:0], v)\n\t\t\tappendValue(bytesutil.ToUnsafeString(bb.B))\n\t\t}\n\t\tbbPool.Put(bb)\n\tcase valueTypeTimestampISO8601:\n\t\tbb := bbPool.Get()\n\t\tfor _, v := range c.encodedValues {\n\t\t\tbb.B = toTimestampISO8601String(bb.B[:0], v)\n\t\t\tappendValue(bytesutil.ToUnsafeString(bb.B))\n\t\t}\n\t\tbbPool.Put(bb)\n\tdefault:\n\t\tlogger.Panicf(\"BUG: unknown valueType=%d\", c.valueType)\n\t}\n\n\tc.values = valuesBuf[valuesBufLen:]\n\tbr.buf = buf\n\tbr.valuesBuf = valuesBuf\n\n\treturn c.values\n}",
"func GetSQLFieldsAndValues(m interface{}) (string, string) {\n\n\tmp := GetMapValues(m)\n\tresultV := \"\"\n\toldResultV := \"\"\n\tresultK := \"\"\n\toldResultK := \"\"\n\tfor key, value := range *mp {\n\t\tresultV += value\n\t\tresultK += \"`\" + key + \"`\"\n\t\toldResultV = resultV\n\t\toldResultK = resultK\n\t\tresultV += \", \"\n\t\tresultK += \", \"\n\t}\n\treturn oldResultK, oldResultV\n}",
"func ColumnNames(v interface{}) []string {\n\tfields := reflect.TypeOf(v)\n\tvalues := reflect.ValueOf(v)\n\tif values.Kind() == reflect.Ptr {\n\t\tvalues = values.Elem()\n\t\tfields = fields.Elem()\n\t}\n\tcols := make([]string, values.NumField())\n\tfor i := 0; i < values.NumField(); i++ {\n\t\tf := fields.Field(i)\n\t\tcols[i] = f.Tag.Get(\"db\")\n\t}\n\treturn cols\n}",
"func getPgColumns(ctx context.Context, log lg.Log, db sqlz.DB, tblName string) ([]*pgColumn, error) {\n\t// colsQuery gets column information from information_schema.columns.\n\t//\n\t// It also has a subquery to get column comments. See:\n\t// - https://stackoverflow.com/a/22547588\n\t// - https://dba.stackexchange.com/a/160668\n\tconst colsQuery = `SELECT table_catalog,\n table_schema,\n table_name,\n column_name, \n ordinal_position, \n column_default,\n is_nullable,\n data_type,\n character_maximum_length,\n character_octet_length,\n numeric_precision,\n numeric_precision_radix,\n numeric_scale,\n datetime_precision,\n domain_catalog,\n domain_schema,\n domain_name,\n udt_catalog,\n udt_schema,\n udt_name,\n is_identity,\n is_generated,\n is_updatable,\n (\n\tSELECT\n\t\tpg_catalog.col_description(c.oid, cols.ordinal_position::int)\n\tFROM\n\t\tpg_catalog.pg_class c\n\tWHERE\n\t\tc.oid = (SELECT ('\"' || cols.table_name || '\"')::regclass::oid)\n\t\tAND c.relname = cols.table_name\n\t) AS column_comment\nFROM information_schema.columns cols\nWHERE cols.table_catalog = current_catalog AND cols.table_schema = current_schema() AND cols.table_name = $1\nORDER BY cols.table_catalog, cols.table_schema, cols.table_name, cols.ordinal_position`\n\n\trows, err := db.QueryContext(ctx, colsQuery, tblName)\n\tif err != nil {\n\t\treturn nil, errz.Err(err)\n\t}\n\n\tdefer log.WarnIfCloseError(rows)\n\n\tvar cols []*pgColumn\n\tfor rows.Next() {\n\t\tcol := &pgColumn{}\n\t\terr = scanPgColumn(rows, col)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcols = append(cols, col)\n\t}\n\terr = closeRows(rows)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cols, nil\n}",
"func (*Sample) scanValues(columns []string) ([]interface{}, error) {\n\tvalues := make([]interface{}, len(columns))\n\tfor i := range columns {\n\t\tswitch columns[i] {\n\t\tcase sample.FieldURL:\n\t\t\tvalues[i] = new([]byte)\n\t\tcase sample.FieldActive:\n\t\t\tvalues[i] = new(sql.NullBool)\n\t\tcase sample.FieldAmount:\n\t\t\tvalues[i] = new(sql.NullFloat64)\n\t\tcase sample.FieldOfficeID:\n\t\t\tvalues[i] = new(sql.NullInt64)\n\t\tcase sample.FieldID, sample.FieldCode, sample.FieldSize, sample.FieldMemo:\n\t\t\tvalues[i] = new(sql.NullString)\n\t\tcase sample.FieldCreatedAt, sample.FieldUpdatedAt:\n\t\t\tvalues[i] = new(sql.NullTime)\n\t\tcase sample.ForeignKeys[0]: // office_id\n\t\t\tvalues[i] = new(sql.NullInt64)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unexpected column %q for type Sample\", columns[i])\n\t\t}\n\t}\n\treturn values, nil\n}",
"func ToRowFromValue(objectValue reflect.Value, columns []interface{}, valueSerializer func(object interface{}) (string, error)) ([]string, error) {\n\tfor reflect.TypeOf(objectValue.Interface()).Kind() == reflect.Ptr {\n\t\tobjectValue = objectValue.Elem()\n\t}\n\tobjectValue = reflect.ValueOf(objectValue.Interface()) // sets value to concerete type\n\trow := make([]string, len(columns))\n\tswitch objectValue.Type().Kind() {\n\tcase reflect.Map:\n\t\tfor j, key := range columns {\n\t\t\tif v := objectValue.MapIndex(reflect.ValueOf(key)); v.IsValid() && (v.Type().Kind() == reflect.String || !v.IsNil()) {\n\t\t\t\tstr, err := valueSerializer(v.Interface())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn row, errors.Wrap(err, \"error serializing value\")\n\t\t\t\t}\n\t\t\t\trow[j] = str\n\t\t\t} else {\n\t\t\t\tstr, err := valueSerializer(nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn row, errors.Wrap(err, \"error serializing value\")\n\t\t\t\t}\n\t\t\t\trow[j] = str\n\t\t\t}\n\t\t}\n\tcase reflect.Struct:\n\t\tfor j, column := range columns {\n\t\t\tcolumnLowerCase := strings.ToLower(fmt.Sprint(column))\n\t\t\tif f := objectValue.FieldByNameFunc(func(match string) bool { return strings.ToLower(match) == columnLowerCase }); f.IsValid() && (f.Type().Kind() == reflect.String || !f.IsNil()) {\n\t\t\t\tstr, err := valueSerializer(f.Interface())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn row, errors.Wrap(err, \"error serializing value\")\n\t\t\t\t}\n\t\t\t\trow[j] = str\n\t\t\t} else {\n\t\t\t\tstr, err := valueSerializer(nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn row, errors.Wrap(err, \"error serializing value\")\n\t\t\t\t}\n\t\t\t\trow[j] = str\n\t\t\t}\n\t\t}\n\t}\n\n\treturn row, nil\n}",
"func GetRowValuesStringForColumnsList(offset int, columns []string) (rowStr string) {\n\tfor i := range columns {\n\t\tif i > 0 {\n\t\t\trowStr += \", \"\n\t\t}\n\t\trowStr += fmt.Sprintf(\"$%d\", offset+i+1)\n\t}\n\treturn\n}",
"func GetColumns(db *sql.DB, table *Table, blackList map[string]bool) {\n\t// retrieve columns\n\tcolDefRows, err := db.Query(\n\t\t`SELECT\n\t\t\tcolumn_name, data_type, column_type, is_nullable, column_default, extra, column_comment \n\t\tFROM\n\t\t\tinformation_schema.columns\n\t\tWHERE\n\t\t\ttable_schema = database() AND table_name = ?`,\n\t\ttable.Name)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not query the database: %s\", err)\n\t}\n\tdefer colDefRows.Close()\n\n\tfor colDefRows.Next() {\n\t\t// datatype as bytes so that SQL <null> values can be retrieved\n\t\tvar colNameBytes, dataTypeBytes, columnTypeBytes, isNullableBytes, columnDefaultBytes, extraBytes, columnCommentBytes []byte\n\t\tif err := colDefRows.Scan(&colNameBytes, &dataTypeBytes, &columnTypeBytes, &isNullableBytes, &columnDefaultBytes, &extraBytes, &columnCommentBytes); err != nil {\n\t\t\tlog.Fatalf(\"Could not query INFORMATION_SCHEMA for column information error: %s\", err)\n\t\t}\n\t\tcolName, columnComment :=\n\t\t\tstring(colNameBytes), string(columnCommentBytes)\n\n\t\t// create a column\n\t\tcol := new(Column)\n\t\tcol.Name = utils.CamelCase(colName)\n\t\tcol.Type = \"string\"\n\n\t\t// Tag info\n\t\ttag := new(OrmTag)\n\t\tif table.Pk == colName || colName == \"id\" {\n\t\t\tcol.Type = \"string\"\n\t\t\ttag.Pk = true\n\t\t}\n\t\ttag.Column = colName\n\t\ttag.Comment = columnComment\n\t\tcol.Tag = tag\n\n\t\ttable.Columns = append(table.Columns, col)\n\t}\n}",
"func exportStruct(s interface{}) ([]string, []interface{}, error) {\n\tstructType := reflect.TypeOf(s)\n\tstructVal := reflect.ValueOf(s)\n\tif structType.Kind() == reflect.Ptr {\n\t\tstructType = structType.Elem()\n\t\tstructVal = structVal.Elem()\n\t}\n\tif structType.Kind() != reflect.Struct {\n\t\treturn nil, nil, fmt.Errorf(\"exportStruct: type %v must be a struct or pointer to a struct\", structType)\n\t}\n\n\tcols := make([]string, 0, structType.NumField())\n\tvals := make([]interface{}, 0, structType.NumField())\n\tfor i := 0; i < structType.NumField(); i++ {\n\t\tfieldInfo := structType.Field(i)\n\t\ttagInfo := fieldInfo.Tag.Get(\"spanner\")\n\t\tif fieldInfo.PkgPath != \"\" || tagInfo == \"-\" { // field is unexported or ignored\n\t\t\tcontinue\n\t\t}\n\t\tif tagInfo == \"\" {\n\t\t\ttagInfo = fieldInfo.Name\n\t\t}\n\t\tcols = append(cols, tagInfo)\n\t\tswitch f := structVal.Field(i).Interface().(type) {\n\t\tcase *string:\n\t\t\tns := spanner.NullString{}\n\t\t\tif ns.Valid = f != nil; ns.Valid {\n\t\t\t\tns.StringVal = *f\n\t\t\t}\n\t\t\tvals = append(vals, ns)\n\t\tcase *int64:\n\t\t\tni := spanner.NullInt64{}\n\t\t\tif ni.Valid = f != nil; ni.Valid {\n\t\t\t\tni.Int64 = *f\n\t\t\t}\n\t\t\tvals = append(vals, ni)\n\t\tcase *bool:\n\t\t\tnb := spanner.NullBool{}\n\t\t\tif nb.Valid = f != nil; nb.Valid {\n\t\t\t\tnb.Bool = *f\n\t\t\t}\n\t\t\tvals = append(vals, nb)\n\t\tcase *float64:\n\t\t\tnf := spanner.NullFloat64{}\n\t\t\tif nf.Valid = f != nil; nf.Valid {\n\t\t\t\tnf.Float64 = *f\n\t\t\t}\n\t\t\tvals = append(vals, nf)\n\t\tcase *time.Time:\n\t\t\tnt := spanner.NullTime{}\n\t\t\tif nt.Valid = f != nil; nt.Valid {\n\t\t\t\tnt.Time = *f\n\t\t\t}\n\t\t\tvals = append(vals, nt)\n\t\tdefault:\n\t\t\t// Use the default behavior for non-nullable or non-primitive columns.\n\t\t\tvals = append(vals, structVal.Field(i).Interface())\n\t\t}\n\t}\n\treturn cols, vals, nil\n}",
"func (*Patient) scanValues(columns []string) ([]interface{}, error) {\n\tvalues := make([]interface{}, len(columns))\n\tfor i := range columns {\n\t\tswitch columns[i] {\n\t\tcase patient.FieldID:\n\t\t\tvalues[i] = &sql.NullInt64{}\n\t\tcase patient.FieldName, patient.FieldBirthday:\n\t\t\tvalues[i] = &sql.NullString{}\n\t\tcase patient.ForeignKeys[0]: // _Gender\n\t\t\tvalues[i] = &sql.NullInt64{}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unexpected column %q for type Patient\", columns[i])\n\t\t}\n\t}\n\treturn values, nil\n}",
"func (e *Event) assignValues(columns []string, values []interface{}) error {\n\tif m, n := len(values), len(columns); m < n {\n\t\treturn fmt.Errorf(\"mismatch number of scan values: %d != %d\", m, n)\n\t}\n\tfor i := range columns {\n\t\tswitch columns[i] {\n\t\tcase event.FieldID:\n\t\t\tvalue, ok := values[i].(*sql.NullInt64)\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for field id\", value)\n\t\t\t}\n\t\t\te.ID = int(value.Int64)\n\t\tcase event.FieldCreatedAt:\n\t\t\tif value, ok := values[i].(*sql.NullTime); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for field created_at\", values[i])\n\t\t\t} else if value.Valid {\n\t\t\t\te.CreatedAt = value.Time\n\t\t\t}\n\t\tcase event.FieldUpdatedAt:\n\t\t\tif value, ok := values[i].(*sql.NullTime); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for field updated_at\", values[i])\n\t\t\t} else if value.Valid {\n\t\t\t\te.UpdatedAt = value.Time\n\t\t\t}\n\t\tcase event.FieldDeletedAt:\n\t\t\tif value, ok := values[i].(*sql.NullTime); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for field deleted_at\", values[i])\n\t\t\t} else if value.Valid {\n\t\t\t\te.DeletedAt = value.Time\n\t\t\t}\n\t\tcase event.FieldTs:\n\t\t\tif value, ok := values[i].(*sql.NullTime); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for field ts\", values[i])\n\t\t\t} else if value.Valid {\n\t\t\t\te.Ts = value.Time\n\t\t\t}\n\t\tcase event.FieldValue:\n\t\t\tif value, ok := values[i].(*sql.NullFloat64); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for field value\", values[i])\n\t\t\t} else if value.Valid {\n\t\t\t\te.Value = value.Float64\n\t\t\t}\n\t\tcase event.FieldAcked:\n\t\t\tif value, ok := values[i].(*sql.NullBool); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for field acked\", values[i])\n\t\t\t} else if value.Valid {\n\t\t\t\te.Acked = value.Bool\n\t\t\t}\n\t\tcase event.FieldAckedTs:\n\t\t\tif value, ok := values[i].(*sql.NullTime); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for field ackedTs\", values[i])\n\t\t\t} else if value.Valid {\n\t\t\t\te.AckedTs = value.Time\n\t\t\t}\n\t\tcase event.FieldEndpointId:\n\t\t\tif value, ok := values[i].(*sql.NullInt64); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for field endpointId\", values[i])\n\t\t\t} else if value.Valid {\n\t\t\t\te.EndpointId = uint(value.Int64)\n\t\t\t}\n\t\tcase event.FieldTypeId:\n\t\t\tif value, ok := values[i].(*sql.NullInt64); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for field typeId\", values[i])\n\t\t\t} else if value.Valid {\n\t\t\t\te.TypeId = uint(value.Int64)\n\t\t\t}\n\t\tcase event.FieldNameId:\n\t\t\tif value, ok := values[i].(*sql.NullInt64); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for field nameId\", values[i])\n\t\t\t} else if value.Valid {\n\t\t\t\te.NameId = uint(value.Int64)\n\t\t\t}\n\t\tcase event.FieldLabelId:\n\t\t\tif value, ok := values[i].(*sql.NullInt64); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for field labelId\", values[i])\n\t\t\t} else if value.Valid {\n\t\t\t\te.LabelId = uint(value.Int64)\n\t\t\t}\n\t\tcase event.FieldClusterId:\n\t\t\tif value, ok := values[i].(*sql.NullInt64); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for field clusterId\", values[i])\n\t\t\t} else if value.Valid {\n\t\t\t\te.ClusterId = uint(value.Int64)\n\t\t\t}\n\t\tcase event.FieldAgentId:\n\t\t\tif value, ok := values[i].(*sql.NullInt64); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for field agentId\", values[i])\n\t\t\t} else if value.Valid {\n\t\t\t\te.AgentId = uint(value.Int64)\n\t\t\t}\n\t\tcase event.FieldNodeId:\n\t\t\tif value, ok := values[i].(*sql.NullInt64); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for field nodeId\", values[i])\n\t\t\t} else if value.Valid {\n\t\t\t\te.NodeId = uint(value.Int64)\n\t\t\t}\n\t\tcase event.FieldProcesId:\n\t\t\tif value, ok := values[i].(*sql.NullInt64); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for field procesId\", values[i])\n\t\t\t} else if value.Valid {\n\t\t\t\te.ProcesId = uint(value.Int64)\n\t\t\t}\n\t\tcase event.FieldContainerId:\n\t\t\tif value, ok := values[i].(*sql.NullInt64); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for field containerId\", values[i])\n\t\t\t} else if value.Valid {\n\t\t\t\te.ContainerId = uint(value.Int64)\n\t\t\t}\n\t\tcase event.FieldPodId:\n\t\t\tif value, ok := values[i].(*sql.NullInt64); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for field podId\", values[i])\n\t\t\t} else if value.Valid {\n\t\t\t\te.PodId = uint(value.Int64)\n\t\t\t}\n\t\tcase event.ForeignKeys[0]:\n\t\t\tif value, ok := values[i].(*sql.NullInt64); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for edge-field metric_endpoint_events\", value)\n\t\t\t} else if value.Valid {\n\t\t\t\te.metric_endpoint_events = new(uint)\n\t\t\t\t*e.metric_endpoint_events = uint(value.Int64)\n\t\t\t}\n\t\tcase event.ForeignKeys[1]:\n\t\t\tif value, ok := values[i].(*sql.NullInt64); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for edge-field metric_label_events\", value)\n\t\t\t} else if value.Valid {\n\t\t\t\te.metric_label_events = new(uint)\n\t\t\t\t*e.metric_label_events = uint(value.Int64)\n\t\t\t}\n\t\tcase event.ForeignKeys[2]:\n\t\t\tif value, ok := values[i].(*sql.NullInt64); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected type %T for edge-field metric_name_events\", value)\n\t\t\t} else if value.Valid {\n\t\t\t\te.metric_name_events = new(uint)\n\t\t\t\t*e.metric_name_events = uint(value.Int64)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func StructToFieldValues(rec interface{}, tag string) ([]string, []interface{}, error) {\n\tvar tableFields []string\n\tvar fieldValues []interface{}\n\tmapDataValue, err := StructToMap(rec)\n\tif err != nil {\n\t\treturn nil, nil, errors.New(\"error computing struct to map\")\n\t}\n\t// compose tagMapDataValue\n\tfor key, val := range mapDataValue {\n\t\ttagField, tagErr := TagField(rec.(struct{}), key, tag)\n\t\tif tagErr != nil {\n\t\t\treturn nil, nil, errors.New(fmt.Sprintf(\"error retrieving tag-field: %v\", key))\n\t\t}\n\t\ttableFields = append(tableFields, tagField)\n\t\tfieldValues = append(fieldValues, val)\n\t}\n\treturn tableFields, fieldValues, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SQLReadEPrint expects a repository map and EPrint ID and will generate a series of SELECT statements populating a new EPrint struct or return an error (e.g. "not found" if eprint id is not in repository) | func SQLReadEPrint(config *Config, repoID string, baseURL string, eprintID int) (*EPrint, error) {
var (
tables map[string][]string
columns []string
)
if eprintID == 0 {
return nil, fmt.Errorf("not found, %d not in %q", eprintID, repoID)
}
_, ok := config.Repositories[repoID]
if !ok {
return nil, fmt.Errorf("not found, %q not defined", repoID)
}
tables = config.Repositories[repoID].TableMap
columns, ok = tables["eprint"]
if !ok {
return nil, fmt.Errorf("not found, %q eprint table not defined", repoID)
}
db, ok := config.Connections[repoID]
if !ok {
return nil, fmt.Errorf("no database connection for %s", repoID)
}
// NOTE: since the specific subset of columns in a repository
// are known only at run time we need to setup a generic pointer
// array for the scan results based on our newly allocated
// EPrint struct.
eprint := new(EPrint) // Generate an empty EPrint struct
eprint.EPrintID = eprintID
// NOTE: The data is littered with NULLs in EPrints. We need to
// generate both a map of values into the EPrint stucture and
// aggregated the SQL Column definitions to deal with the NULL
// values.
columnSQL, values := eprintToColumnsAndValues(eprint, columns, true)
// NOTE: With the "values" pointer array setup the query can be built
// and executed in the usually SQL fashion.
stmt := fmt.Sprintf(`SELECT %s FROM eprint WHERE eprintid = ? LIMIT 1`, strings.Join(columnSQL, `, `))
rows, err := db.Query(stmt, eprintID)
if err != nil {
return nil, fmt.Errorf(`ERROR: query error (%q, %q), %s`, repoID, stmt, err)
}
cnt := 0
for rows.Next() {
// NOTE: Because values array holds the addresses into our
// EPrint struct the "Scan" does the actual mapping.
// This makes it sorta "auto-magical"
if err := rows.Scan(values...); err != nil {
log.Printf(`%s.eprint eprintid = %d, %s`, repoID, eprintID, err)
}
cnt++
}
rows.Close()
// NOTE: need to handle zero rows returned!
if cnt > 0 {
// Normalize fields inferred from MySQL database tables.
eprint.ID = fmt.Sprintf(`%s/id/eprint/%d`, baseURL, eprint.EPrintID)
eprint.LastModified = makeTimestamp(eprint.LastModifiedYear, eprint.LastModifiedMonth, eprint.LastModifiedDay, eprint.LastModifiedHour, eprint.LastModifiedMinute, eprint.LastModifiedSecond)
// NOTE: EPrint XML uses a datestamp for output but tracks a timestamp.
eprint.Datestamp = makeTimestamp(eprint.DatestampYear, eprint.DatestampMonth, eprint.DatestampDay, eprint.DatestampHour, eprint.DatestampMinute, eprint.DatestampSecond)
eprint.StatusChanged = makeTimestamp(eprint.StatusChangedYear, eprint.StatusChangedMonth, eprint.StatusChangedDay, eprint.StatusChangedHour, eprint.StatusChangedMinute, eprint.StatusChangedSecond)
eprint.Date = makeApproxDate(eprint.DateYear, eprint.DateMonth, eprint.DateDay)
// FIXME: Add Depository info (eprint.userid -> user* tables)
// deposited on, deposited by
if eprint.UserID > 0 {
eprint.DepositedBy = userIDToName(repoID, eprint.UserID, db)
eprint.DepositedOn = makeTimestamp(eprint.DatestampYear, eprint.DatestampMonth, eprint.DatestampDay, eprint.DatestampHour, eprint.DatestampMinute, eprint.DatestampSecond)
}
// Used in CaltechTHESIS
eprint.ThesisSubmittedDate = makeDatestamp(eprint.ThesisSubmittedDateYear, eprint.ThesisSubmittedDateMonth, eprint.ThesisSubmittedDateDay)
eprint.ThesisDefenseDate = makeDatestamp(eprint.ThesisDefenseDateYear, eprint.ThesisDefenseDateMonth, eprint.ThesisDefenseDateDay)
eprint.ThesisApprovedDate = makeDatestamp(eprint.ThesisApprovedDateYear, eprint.ThesisApprovedDateMonth, eprint.ThesisApprovedDateDay)
eprint.ThesisPublicDate = makeDatestamp(eprint.ThesisPublicDateYear, eprint.ThesisPublicDateMonth, eprint.ThesisPublicDateDay)
eprint.ThesisDegreeDate = makeDatestamp(eprint.ThesisDegreeDateYear, eprint.ThesisDegreeDateMonth, eprint.ThesisDegreeDateDay)
eprint.GradOfficeApprovalDate = makeDatestamp(eprint.GradOfficeApprovalDateYear, eprint.GradOfficeApprovalDateMonth, eprint.GradOfficeApprovalDateDay)
// CreatorsItemList
eprint.Creators = eprintIDToCreators(repoID, eprintID, db, tables)
// EditorsItemList
eprint.Editors = eprintIDToEditors(repoID, eprintID, db, tables)
// ContributorsItemList
eprint.Contributors = eprintIDToContributors(repoID, eprintID, db, tables)
// CorpCreators
eprint.CorpCreators = eprintIDToCorpCreators(repoID, eprintID, db, tables)
// CorpContributors
eprint.CorpContributors = eprintIDToCorpContributors(repoID, eprintID, db, tables)
// LocalGroupItemList (SimpleItemList)
eprint.LocalGroup = eprintIDToLocalGroup(repoID, eprintID, db, tables)
// FundersItemList (custom)
eprint.Funders = eprintIDToFunders(repoID, eprintID, db, tables)
// Documents (*DocumentList)
eprint.Documents = eprintIDToDocumentList(repoID, baseURL, eprintID, db, tables)
// RelatedURLs List
eprint.RelatedURL = eprintIDToRelatedURL(repoID, baseURL, eprintID, db, tables)
// ReferenceText (item list)
eprint.ReferenceText = eprintIDToReferenceText(repoID, eprintID, db, tables)
// Projects
eprint.Projects = eprintIDToProjects(repoID, eprintID, db, tables)
// OtherNumberingSystem (item list)
eprint.OtherNumberingSystem = eprintIDToOtherNumberingSystem(repoID, eprintID, db, tables)
// Subjects List
eprint.Subjects = eprintIDToSubjects(repoID, eprintID, db, tables)
// ItemIssues
eprint.ItemIssues = eprintIDToItemIssues(repoID, eprintID, db, tables)
// Exhibitors
eprint.Exhibitors = eprintIDToExhibitors(repoID, eprintID, db, tables)
// Producers
eprint.Producers = eprintIDToProducers(repoID, eprintID, db, tables)
// Conductors
eprint.Conductors = eprintIDToConductors(repoID, eprintID, db, tables)
// Lyricists
eprint.Lyricists = eprintIDToLyricists(repoID, eprintID, db, tables)
// Accompaniment
eprint.Accompaniment = eprintIDToAccompaniment(repoID, eprintID, db, tables)
// SkillAreas
eprint.SkillAreas = eprintIDToSkillAreas(repoID, eprintID, db, tables)
// CopyrightHolders
eprint.CopyrightHolders = eprintIDToCopyrightHolders(repoID, eprintID, db, tables)
// Reference
eprint.Reference = eprintIDToReference(repoID, eprintID, db, tables)
// ConfCreators
eprint.ConfCreators = eprintIDToConfCreators(repoID, eprintID, db, tables)
// AltTitle
eprint.AltTitle = eprintIDToAltTitle(repoID, eprintID, db, tables)
// PatentAssignee
eprint.PatentAssignee = eprintIDToPatentAssignee(repoID, eprintID, db, tables)
// RelatedPatents
eprint.RelatedPatents = eprintIDToRelatedPatents(repoID, eprintID, db, tables)
// Divisions
eprint.Divisions = eprintIDToDivisions(repoID, eprintID, db, tables)
// ThesisAdvisor
eprint.ThesisAdvisor = eprintIDToThesisAdvisors(repoID, eprintID, db, tables)
// ThesisCommittee
eprint.ThesisCommittee = eprintIDToThesisCommittee(repoID, eprintID, db, tables)
// OptionMajor
eprint.OptionMajor = eprintIDToOptionMajor(repoID, eprintID, db, tables)
// OptionMinor
eprint.OptionMinor = eprintIDToOptionMinor(repoID, eprintID, db, tables)
/*************************************************************
NOTE: These are notes about possible original implementation
errors or elements that did not survive the upgrade to
EPrints 3.3.16
eprint.LearningLevels (not an item list in EPrints) using LearningLevelText
GScholar, skipping not an item list, a 2010 plugin for EPRints 3.2.
eprint.GScholar = eprintIDToGScholar(repoID, eprintID, db, tables)
Shelves, a plugin, not replicating, not an item list
eprint.Shelves = eprintIDToSchelves(repoID, eprintID, db, tables)
eprint.PatentClassification is not not an item list, using eprint.PatentClassificationText
eprint.OtherURL appears to be an extraneous
eprint.CorpContributors apears to be an extraneous
*************************************************************/
} else {
return nil, fmt.Errorf("not found")
}
return eprint, nil
} | [
"func ImportEPrints(config *Config, repoID string, ds *DataSource, eprints *EPrints) ([]int, error) {\n\tvar importErrors error\n\tids := []int{}\n\n\tif config.Connections == nil {\n\t\treturn nil, fmt.Errorf(`no databases are not configured`)\n\t}\n\t_, ok := config.Connections[repoID]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(`%s database connection not configured`, repoID)\n\t}\n\n\t// Check to make sure updates are allowed if non-Zero\n\t// eprint ids present.\n\tfor _, eprint := range eprints.EPrint {\n\t\tif eprint.EPrintID != 0 {\n\t\t\treturn nil, fmt.Errorf(\"create failed eprint id %d in %s\", eprint.EPrintID, repoID)\n\t\t}\n\t\tif eprint.Collection == \"\" && ds.DefaultCollection != \"\" {\n\t\t\teprint.Collection = DefaultCollection\n\t\t}\n\t\tif eprint.IDNumber == \"\" && ds.DefaultOfficialURL != \"\" {\n\t\t\teprint.IDNumber = GenerateIDNumber(eprint)\n\t\t}\n\t\tif eprint.OfficialURL == \"\" && ds.DefaultOfficialURL != \"\" {\n\t\t\teprint.OfficialURL = GenerateOfficialURL(eprint)\n\t\t}\n\t\tif eprint.Rights == \"\" && ds.DefaultRights != \"\" {\n\t\t\teprint.Rights = ds.DefaultRights\n\t\t}\n\t\tif eprint.Refereed == \"\" && eprint.Type == \"article\" &&\n\t\t\tds.DefaultRefereed != \"\" {\n\t\t\teprint.Refereed = ds.DefaultRefereed\n\t\t}\n\t\tif eprint.EPrintStatus == \"\" && ds.DefaultStatus != \"\" {\n\t\t\teprint.EPrintStatus = ds.DefaultStatus\n\t\t}\n\t\tif eprint.Abstract != \"\" && ds.StripTags {\n\t\t\tif cleaner.HasEncodedElements([]byte(eprint.Abstract)) {\n\t\t\t\teprint.Abstract = string(cleaner.StripTags([]byte(eprint.Abstract)))\n\t\t\t}\n\t\t}\n\t}\n\tfor _, eprint := range eprints.EPrint {\n\t\tid, err := SQLCreateEPrint(config, repoID, ds, eprint)\n\t\tif err != nil {\n\t\t\tif importErrors == nil {\n\t\t\t\timportErrors = err\n\t\t\t} else {\n\t\t\t\timportErrors = fmt.Errorf(\"%s; %s\", importErrors, err)\n\t\t\t}\n\t\t}\n\t\tids = append(ids, id)\n\t}\n\treturn ids, importErrors\n}",
"func SQLCreateEPrint(config *Config, repoID string, ds *DataSource, eprint *EPrint) (int, error) {\n\tvar (\n\t\terr error\n\t)\n\tdb, ok := config.Connections[repoID]\n\tif !ok {\n\t\treturn 0, fmt.Errorf(`no database connection for %s`, repoID)\n\t}\n\t// If eprint id is zero generate a sequence of INSERT statements\n\t// for the record. Others use generate the appropriate History\n\t// records and then delete insert the new record.\n\ttableName := `eprint`\n\n\tif columns, ok := ds.TableMap[tableName]; ok {\n\t\t// Generate an empty row and capture the id created.\n\t\tstmt := `INSERT INTO eprint (eprintid) (SELECT (IFNULL((SELECT eprintid FROM eprint ORDER BY eprintid DESC LIMIT 1), 0) + 1) AS eprintid)`\n\t\t_, err := db.Exec(stmt)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(`SQL error, %q, %s`, stmt, err)\n\t\t}\n\t\tstmt = `SELECT eprintid FROM eprint ORDER BY eprintid DESC LIMIT 1`\n\t\trows, err := db.Query(stmt)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(`SQL error, %q, %s`, stmt, err)\n\t\t}\n\t\tid := 0\n\t\tfor rows.Next() {\n\t\t\tif err := rows.Scan(&id); err != nil {\n\t\t\t\treturn 0, fmt.Errorf(`could not calculate the new eprintid value, %s`, err)\n\t\t\t}\n\t\t}\n\t\trows.Close()\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(`SQL failed to get insert id, %s`, err)\n\t\t}\n\t\teprint.EPrintID = int(id)\n\t\teprint.Dir = makeDirValue(eprint.EPrintID)\n\t\t// FIXME: decide if the is automatic or if this should be\n\t\t// passed in with the data structure.\n\t\t// Generate minimal date and time stamps\n\t\tnow := time.Now()\n\t\tif eprint.Datestamp == \"\" {\n\t\t\teprint.Datestamp = now.Format(timestamp)\n\t\t\teprint.DatestampYear = now.Year()\n\t\t\teprint.DatestampMonth = int(now.Month())\n\t\t\teprint.DatestampDay = now.Day()\n\t\t\teprint.DatestampHour = now.Hour()\n\t\t\teprint.DatestampMinute = now.Minute()\n\t\t\teprint.DatestampSecond = now.Second()\n\t\t} else if dt, err := time.Parse(datestamp, eprint.Datestamp); err == nil {\n\t\t\teprint.DatestampYear = dt.Year()\n\t\t\teprint.DatestampMonth = int(dt.Month())\n\t\t\teprint.DatestampDay = dt.Day()\n\t\t} else if dt, err := time.Parse(timestamp, eprint.Datestamp); err == nil {\n\t\t\teprint.DatestampYear = dt.Year()\n\t\t\teprint.DatestampMonth = int(dt.Month())\n\t\t\teprint.DatestampDay = dt.Day()\n\t\t\teprint.DatestampHour = dt.Hour()\n\t\t\teprint.DatestampMinute = dt.Minute()\n\t\t\teprint.DatestampSecond = dt.Second()\n\t\t}\n\n\t\teprint.LastModified = now.Format(timestamp)\n\t\teprint.LastModifiedYear = now.Year()\n\t\teprint.LastModifiedMonth = int(now.Month())\n\t\teprint.LastModifiedDay = now.Day()\n\t\teprint.LastModifiedHour = now.Hour()\n\t\teprint.LastModifiedMinute = now.Minute()\n\t\teprint.LastModifiedSecond = now.Second()\n\n\t\teprint.StatusChanged = now.Format(timestamp)\n\t\teprint.StatusChangedYear = now.Year()\n\t\teprint.StatusChangedMonth = int(now.Month())\n\t\teprint.StatusChangedDay = now.Day()\n\t\teprint.StatusChangedHour = now.Hour()\n\t\teprint.StatusChangedMinute = now.Minute()\n\t\teprint.StatusChangedSecond = now.Second()\n\n\t\tif eprint.Date != \"\" {\n\t\t\teprint.DateYear, eprint.DateMonth, eprint.DateDay = approxYMD(eprint.Date)\n\t\t}\n\t\tif eprint.ThesisSubmittedDate != \"\" {\n\t\t\teprint.ThesisSubmittedDateYear, eprint.ThesisSubmittedDateMonth, eprint.ThesisSubmittedDateDay = approxYMD(eprint.ThesisSubmittedDate)\n\t\t}\n\t\tif eprint.ThesisDefenseDate != \"\" {\n\t\t\teprint.ThesisDefenseDateYear, eprint.ThesisDefenseDateMonth, eprint.ThesisDefenseDateDay = approxYMD(eprint.ThesisDefenseDate)\n\t\t}\n\t\tif eprint.ThesisApprovedDate != \"\" {\n\t\t\teprint.ThesisApprovedDateYear, eprint.ThesisApprovedDateMonth, eprint.ThesisApprovedDateDay = approxYMD(eprint.ThesisApprovedDate)\n\t\t}\n\t\tif eprint.ThesisPublicDate != \"\" {\n\t\t\teprint.ThesisPublicDateYear, eprint.ThesisPublicDateMonth, eprint.ThesisPublicDateDay = approxYMD(eprint.ThesisPublicDate)\n\t\t}\n\t\tif eprint.GradOfficeApprovalDate != \"\" {\n\t\t\teprint.GradOfficeApprovalDateYear, eprint.GradOfficeApprovalDateMonth, eprint.GradOfficeApprovalDateDay = approxYMD(eprint.GradOfficeApprovalDate)\n\t\t}\n\n\t\t// Step two, write the rest of the date into the main table.\n\t\tcolumnsSQL, values := eprintToColumnsAndValues(eprint, columns, false)\n\t\tstmt = fmt.Sprintf(`REPLACE INTO %s (%s) VALUES (%s)`,\n\t\t\ttableName,\n\t\t\tstrings.Join(columnsSQL, `, `),\n\t\t\tstrings.Join(qmList(len(columnsSQL)), `, `))\n\t\t_, err = db.Exec(stmt, values...)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(`SQL error, %q, %s`, stmt, err)\n\t\t}\n\t}\n\tif eprint.EPrintID != 0 {\n\t\tfor tableName, columns := range ds.TableMap {\n\t\t\t// Handle the remaining tables, i.e. skip eprint table.\n\t\t\tswitch {\n\t\t\tcase tableName == `eprint`:\n\t\t\t\t// Skip eprint table, we've already processed it\n\t\t\tcase tableName == `eprint_keyword`:\n\t\t\t\t// Skip eprint_keyword, our EPrints use keywords (longtext) in eprint table.\n\t\t\tcase strings.HasPrefix(tableName, `document`):\n\t\t\t\t//log.Printf(`FIXME %s columns: %s`, tableName, strings.Join(columns, `, `))\n\t\t\tcase strings.HasPrefix(tableName, `file`):\n\t\t\t\t//log.Printf(`FIXME %s columns: %s`, tableName, strings.Join(columns, `, `))\n\t\t\tdefault:\n\t\t\t\t// Insert new rows in associated table\n\t\t\t\tif err := insertItemList(db, repoID, tableName, columns, eprint); err != nil {\n\t\t\t\t\treturn eprint.EPrintID, fmt.Errorf(`failed to insert eprintid %d in table %s for %s, %s`, eprint.EPrintID, tableName, repoID, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn eprint.EPrintID, nil\n\t}\n\treturn 0, err\n}",
"func GetAllEPrintIDs(config *Config, repoID string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint\nORDER BY date_year DESC, date_month DESC, date_day DESC`)\n}",
"func ReadEmployees() (int, error) {\n ctx := context.Background()\n\n // Check if database is alive.\n err := db.PingContext(ctx)\n if err != nil {\n return -1, err\n }\n\n tsql := fmt.Sprintf(\"select @@version;\")\n\n // Execute query\n rows, err := db.QueryContext(ctx, tsql)\n if err != nil {\n return -1, err\n }\n\n defer rows.Close()\n\n var count int\n\n // Iterate through the result set.\n for rows.Next() {\n var name, location string\n var id int\n\n // Get values from row.\n err := rows.Scan(&id, &name, &location)\n if err != nil {\n return -1, err\n }\n\n fmt.Printf(\"ID: %d, Name: %s, Location: %s\\n\", id, name, location)\n count++\n }\n\n return count, nil\n}",
"func Read(s beam.Scope, driver, dsn, table string, t reflect.Type) beam.PCollection {\n\ts = s.Scope(driver + \".Read\")\n\treturn query(s, driver, dsn, fmt.Sprintf(\"SELECT * from %v\", table), t)\n}",
"func DbwRead(entity, id string) (*map[string]interface{}, error) {\n\treturn (dbInt).Read(entity, id)\n}",
"func (*Concrete) ReadByMap(out interface{}, cond map[string]interface{}) (err error) {\n\to := database.Connection()\n\terr = o.Find(out, cond).Error\n\treturn\n}",
"func GetEPrintIDsWithStatus(config *Config, repoID string, status string, start string, end string) ([]int, error) {\n\tstmt := `SELECT eprintid FROM eprint WHERE (eprint_status = ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 59), 2, \"0\")) <= ?)\nORDER BY lastmod_year DESC, lastmod_month DESC, lastmod_day DESC,\n lastmod_hour DESC, lastmod_minute DESC, lastmod_minute DESC`\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func GetEPrintIDsForPersonOrOrgID(config *Config, repoID string, personOrOrgType string, personOrOrgID string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprint_%s_id.eprintid AS eprintid\nFROM eprint_%s_id JOIN eprint ON (eprint_%s_id.eprintid = eprint.eprintid)\nWHERE eprint_%s_id.%s_id = ?\nORDER BY date_year DESC, date_month DESC, date_day DESC`,\n\t\tpersonOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType)\n\treturn sqlQueryIntIDs(config, repoID, stmt, personOrOrgID)\n}",
"func (r *EventRepository) Read(ID int64) (*model.Event, error) {\n\tif ID <= 0 {\n\t\treturn nil,\n\t\t\trepository.NewError(\n\t\t\t\trepository.ErrorInvalidArgument,\n\t\t\t\t\"first parameter must be greater than zero\",\n\t\t\t)\n\t}\n\n\tif err := r.Connect(); err != nil {\n\t\treturn nil, err\n\t}\n\n\trow := r.conn.QueryRow(\n\t\tr.ctx,\n\t\t`SELECT id, title, description, location, start_time, end_time, notify_before, user_id, calendar_id FROM event WHERE id = $1`,\n\t\tID,\n\t)\n\n\tm := &model.Event{}\n\terr := row.Scan(&m.ID, &m.Title, &m.Description, &m.Location, &m.StartTime, &m.EndTime, &m.NotifyBefore, &m.UserID, &m.CalendarID)\n\tif err != nil {\n\t\tif err == pgx.ErrNoRows {\n\t\t\treturn nil,\n\t\t\t\trepository.NewError(\n\t\t\t\t\trepository.ErrorNotFound,\n\t\t\t\t\tfmt.Sprintf(\"failed to find record with ID: %d\", ID),\n\t\t\t\t)\n\t\t}\n\t\treturn nil,\n\t\t\trepository.WrapErrorf(\n\t\t\t\trepository.ErrorDataRetrievalFailure,\n\t\t\t\terr,\n\t\t\t\t\"failed to read record with ID: %d\",\n\t\t\t\tID,\n\t\t\t)\n\t}\n\n\tm.StartTime = m.StartTime.UTC()\n\tm.EndTime = m.EndTime.UTC()\n\n\treturn m, nil\n}",
"func (pg *dataStorage) Retrieve(\n\tgtx context.Context,\n\tdtype string,\n\tsortFiled string,\n\toffset int64,\n\tlimit int64,\n\tfilter *teak.Filter,\n\tout interface{}) (err error) {\n\tselector := generateSelector(filter)\n\tvar buf strings.Builder\n\tbuf.Grow(100)\n\tbuf.WriteString(\"SELECT * FROM \")\n\tbuf.WriteString(dtype)\n\tbuf.WriteString(selector)\n\tbuf.WriteString(\" OFFSET \")\n\tbuf.WriteString(strconv.FormatInt(offset, 10))\n\tbuf.WriteString(\" LIMIT \")\n\tbuf.WriteString(strconv.FormatInt(limit, 10))\n\tbuf.WriteString(\" ORDER BY \")\n\tbuf.WriteString(sortFiled) //Check for minus sign?? like in mongo??\n\terr = defDB.SelectContext(gtx, out, buf.String())\n\treturn teak.LogError(\"t.pg.store\", err)\n}",
"func GetEPrintIDsForUniqueID(config *Config, repoID string, field string, value string) ([]int, error) {\n\t// NOTE: There should only be one eprint per DOI but we have dirty data because the field is not contrained as Unique\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE LOWER(%s) = LOWER(?)`, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, value)\n}",
"func (db *Database) ReadEntries(query map[string]interface{}) (map[string]interface{}, error) {\n\toutput := make(map[string]interface{})\n\tif query[\"table\"] == nil {\n\t\treturn output, errors.New(\"'table' need to be set for now\")\n\t}\n\tif query[\"limit\"] == nil {\n\t\tquery[\"limit\"] = fmt.Sprintf(\"%d\", MaxValues)\n\t}\n\tif query[\"order_by\"] == nil {\n\t\tquery[\"order_by\"] = \"created\"\n\t}\n\tif query[\"order\"] == nil {\n\t\tquery[\"order\"] = \"desc\"\n\t}\n\tif query[\"content\"] == nil {\n\t\tquery[\"content\"] = make(map[string]interface{})\n\t}\n\treturn db.queryTable(fmt.Sprintf(\"%v\", query[\"table\"]), query)\n}",
"func (m *MongoDbConn) GetPrint(id string) (*Print, error) {\n\n\tdblock.Lock()\n\tdefer dblock.Unlock()\n\n\tdb := m.Sess.DB(m.name)\n\tif db == nil {\n\t\treturn nil, errors.New(\"Getting single graphic print: MongoDB descriptor empty.\")\n\t}\n\n\t// start goroutine to get a user\n\tch := make(chan *Print)\n\tgo func(id string, ch chan *Print) {\n\n\t\tp := NewPrint() // create empty print\n\n\t\terr := db.C(\"prints\").Find(bson.M{\"_id\": MongoStringToId(id)}).One(&p)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tch <- p\n\t}(id, ch)\n\n\tp := <-ch\n\treturn p, nil // all OK\n}",
"func GetAllEPrintIDsWithStatus(config *Config, repoID string, status string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint WHERE (eprint_status = ?) ORDER BY date_year DESC, date_month DESC, date_day DESC`, status)\n}",
"func (oc *OffchainDB) ReadData(ID string) error {\n\tresults, err := oc.Conn.Query(\"SELECT Offdata FROM linkdata where ID=?\", ID)\n\tif err != nil {\n\t\tfmt.Printf(\"Something went wrong while trying to select from database.\\n%v\",err)\n\t\treturn err\n\t}\n\tdefer results.Close() \n\tfor results.Next() {\n\t\tvar links Linkdata\n\t\t// for each row, scan the result into our tag composite object\n\t\terr = results.Scan(&links.data)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Something went wrong while casting data to the composite object.\\n%v\",err)\n\t\t\treturn err\n\t\t}\n\t\tlog.Printf(\"DATA: %s\\n\", links.data)\n\t}\n\treturn nil\n}",
"func (d *DB) EQuery(ctx context.Context, statement string, fields []string, args ...interface{}) (connection.ResultFetch, error) {\n\ts, a, err := connection.EscapeArgs(statement, args)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"escaping arguments\")\n\t}\n\treturn d.Query(ctx, s, fields, a)\n}",
"func (m *MongoDbConn) GetPrints(srch string) ([]*Print, error) {\n\n\tdblock.Lock()\n\tdefer dblock.Unlock()\n\n\tdb := m.Sess.DB(m.name)\n\tif db == nil {\n\t\treturn nil, errors.New(\"Getting graphic prints: DB descriptor empty.\")\n\t}\n\n\t// start a new goroutine to get prints from DB\n\tch := make(chan []*Print)\n\tgo func(ch chan []*Print, qry string) {\n\n\t\t// check channel\n\t\tif ch == nil {\n\t\t\treturn\n\t\t}\n\n\t\tp := make([]*Print, 0)\n\t\tif qry == \"\" {\n\t\t\t_ = db.C(\"prints\").Find(bson.D{}).All(&p)\n\t\t} else {\n\t\t\t_ = db.C(\"prints\").Find(bson.M{\"$text\": bson.M{\"$search\": qry}}).Sort(\"work.title\").All(&p)\n\t\t}\n\t\tch <- p\n\n\t}(ch, srch)\n\n\tp := <-ch\n\treturn p, nil // OK\n}",
"func GetEPrintIDsForYear(config *Config, repoID string, year int) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE date_type = \"published\" AND date_year = ? ORDER BY date_year DESC, date_month DESC, date_day DESC`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, year)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
qmList generates an array of string where each element holds "?". | func qmList(length int) []string {
list := []string{}
for i := 0; i < length; i++ {
list = append(list, `?`)
}
return list
} | [
"func getQueries(raw []string) []string {\n\tvar res []string\n\n\tfor _, q := range raw {\n\t\tfor _, qs := range strings.Split(q, \";\") {\n\t\t\tif nq := strings.TrimSpace(qs); len(nq) > 0 {\n\t\t\t\tres = append(res, nq+\";\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn res\n}",
"func (n BIP39Korean) GetList() []string {\n\treturn bip39Korean\n}",
"func (db *RDBMS) QStrArrMap(query string, params ...any) M.SAX {\n\tres := M.SAX{}\n\trows := db.QAll(query, params...)\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\trow := rows.ScanSlice()\n\t\tres[X.ToS(row[0])] = row[1:]\n\t}\n\treturn res\n}",
"func QuotedStringList(list []string) string {\n\tresult := make([]string, len(list))\n\tfor i, s := range list {\n\t\tresult[i] = fmt.Sprintf(\"%q\", s)\n\t}\n\treturn strings.Join(result, \", \")\n}",
"func FillStringArray(param string, l int) []string {\n\tfArray := make([]string, l)\n\tfor i := 0; i < l; i++ {\n\t\tfArray[i] = param\n\t}\n\treturn fArray\n}",
"func (self AgreementProtocolList) As_String_Array() []string {\n\tr := make([]string, 0, 10)\n\tfor _, e := range self {\n\t\tr = append(r, e.Name)\n\t}\n\treturn r\n}",
"func toEnglishList(elements []string) string {\n\tret := \"\"\n\tfor i, str := range elements {\n\t\tif len(elements)-i >= 3 {\n\t\t\tret += str + \", \"\n\t\t} else if len(elements)-i == 2 {\n\t\t\tret += str\n\t\t\tif len(elements) > 2 {\n\t\t\t\tret += \", \"\n\t\t\t} else {\n\t\t\t\tret += \" \"\n\t\t\t}\n\t\t} else {\n\t\t\tif len(elements) > 1 {\n\t\t\t\tret += \"and \"\n\t\t\t}\n\t\t\tret += str\n\t\t}\n\t}\n\treturn ret\n}",
"func List() []string {\n\treturn []string{\n\t\t\"en\",\n\t}\n}",
"func kaprekarNumbers(p int32, q int32) {\n // Write your code here\n var kaprekar []string = make([]string,0)\n for x:=p; x<=q; x++ {\n if _,ok := isKaprekar(x); ok {\n kaprekar = append(kaprekar,strconv.Itoa(int(x)))\n }\n }\n if len(kaprekar) == 0 {\n fmt.Println(\"INVALID RANGE\")\n } else {\n fmt.Println(strings.Join(kaprekar,\" \"))\n }\n return\n}",
"func stringFromList(l *list.List) string {\n\ta := make([]string, l.Len())\n\ti := 0\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\ta[i] = fmt.Sprintf(\"%v\", e.Value)\n\t\ti++\n\t}\n\treturn strings.Join(a, \", \")\n}",
"func TestGenerateMessageListQueryString(t *testing.T) {\n\tfor _, tt := range listQueryStringTests {\n\t\tqOptions := tt.qOptions\n\t\tjsonOpts, err := json.Marshal(qOptions)\n\t\tif len(tt.qAfter) > 0 {\n\t\t\tdt, err := time.Parse(time.RFC3339, tt.qAfter)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"gmailutil.GenerateMessageListQueryString('%s') Error: [%v]\", jsonOpts, err.Error())\n\t\t\t}\n\t\t\tqOptions.After = dt\n\t\t}\n\t\tif len(tt.qBefore) > 0 {\n\t\t\tdt, err := time.Parse(time.RFC3339, tt.qBefore)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"gmailutil.GenerateMessageListQueryString('%s') Error: [%v]\", jsonOpts, err.Error())\n\t\t\t}\n\t\t\tqOptions.Before = dt\n\t\t}\n\t\tgotString := qOptions.Encode()\n\t\tif gotString != tt.qString {\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"gmailutil.GenerateMessageListQueryString() Error: [%v]\", err.Error())\n\t\t\t}\n\t\t\tt.Errorf(\"gmailutil.GenerateMessageListQueryString('%s') Error: want [%v] got [%v]\", jsonOpts, tt.qString, gotString)\n\t\t}\n\t}\n}",
"func (b *Bool) List() []string {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\treturn []string{b.String()}\n}",
"func (c Items) mekeShopitemArray(itemlist string) []string {\n\titems := strings.Split(itemlist, \";\")\n\titemNumber := len(items) - 1\n\n\tvar list []string\n\n\tfor index, item := range items {\n\t\twords := []byte(item)\n\n\t\tif index != 0 {\n\t\t\tif len(words) > 2 {\n\t\t\t\twords = words[2:len(words)]\n\t\t\t}\n\t\t}\n\n\t\tstr := string(words)\n\t\tlist = append(list, str)\n\t}\n\n\tlist = list[:itemNumber]\n\n\treturn list\n}",
"func (s ShortLinkSQL) composeParamList(numParams int) string {\n\tparams := make([]string, 0, numParams)\n\tfor i := 0; i < numParams; i++ {\n\t\tparams = append(params, fmt.Sprintf(\"$%d\", i+1))\n\t}\n\n\tparameterStr := strings.Join(params, \", \")\n\treturn parameterStr\n}",
"func convertInterfaceToStringArrNullable(purportedList interface{}) []string {\n\tarr := convertInterfaceToStringArr(purportedList)\n\tif len(arr) < 1 {\n\t\treturn nil\n\t}\n\treturn arr\n}",
"func parseQuestions(lines [][]string) []problem {\n\tproblems := make([]problem, len(lines))\n\tfor i, line := range lines {\n\t\tproblems[i] = problem{\n\t\t\tquestion: line[0],\n\t\t\tanswer: strings.TrimSpace(line[1]),\n\t\t}\n\t}\n\treturn problems\n}",
"func (cc *cclass) list() []byte {\n\tlist := make([]byte, 0, 16)\n\tfor i := 0; i < 256; i++ {\n\t\tif cc[i>>3]&(1<<(i&7)) != 0 {\n\t\t\tlist = append(list, byte(i))\n\t\t}\n\t}\n\treturn list\n}",
"func discoverQueues(monitoredQueues string) error {\n\tvar err error\n\tvar elem *ibmmq.PCFParameter\n\tvar datalen int\n\n\tif monitoredQueues == \"\" {\n\t\treturn err\n\t}\n\n\tqueues := strings.Split(monitoredQueues, \",\")\n\tfor i := 0; i < len(queues) && err == nil; i++ {\n\t\tvar buf []byte\n\n\t\tpattern := queues[i]\n\n\t\tif strings.Count(pattern, \"*\") > 1 ||\n\t\t\t(strings.Count(pattern, \"*\") == 1 && !strings.HasSuffix(pattern, \"*\")) {\n\t\t\treturn fmt.Errorf(\"Queue pattern '%s' is not valid\", pattern)\n\t\t}\n\n\t\tputmqmd := ibmmq.NewMQMD()\n\t\tpmo := ibmmq.NewMQPMO()\n\n\t\tpmo.Options = ibmmq.MQPMO_NO_SYNCPOINT\n\t\tpmo.Options |= ibmmq.MQPMO_NEW_MSG_ID\n\t\tpmo.Options |= ibmmq.MQPMO_NEW_CORREL_ID\n\t\tpmo.Options |= ibmmq.MQPMO_FAIL_IF_QUIESCING\n\n\t\tputmqmd.Format = \"MQADMIN\"\n\t\tputmqmd.ReplyToQ = replyQObj.Name\n\t\tputmqmd.MsgType = ibmmq.MQMT_REQUEST\n\t\tputmqmd.Report = ibmmq.MQRO_PASS_DISCARD_AND_EXPIRY\n\n\t\tcfh := ibmmq.NewMQCFH()\n\n\t\t// Can allow all the other fields to default\n\t\tcfh.Command = ibmmq.MQCMD_INQUIRE_Q_NAMES\n\n\t\t// Add the parameters one at a time into a buffer\n\t\tpcfparm := new(ibmmq.PCFParameter)\n\t\tpcfparm.Type = ibmmq.MQCFT_STRING\n\t\tpcfparm.Parameter = ibmmq.MQCA_Q_NAME\n\t\tpcfparm.String = []string{pattern}\n\t\tcfh.ParameterCount++\n\t\tbuf = append(buf, pcfparm.Bytes()...)\n\n\t\tpcfparm = new(ibmmq.PCFParameter)\n\t\tpcfparm.Type = ibmmq.MQCFT_INTEGER\n\t\tpcfparm.Parameter = ibmmq.MQIA_Q_TYPE\n\t\tpcfparm.Int64Value = []int64{int64(ibmmq.MQQT_LOCAL)}\n\t\tcfh.ParameterCount++\n\t\tbuf = append(buf, pcfparm.Bytes()...)\n\n\t\t// Once we know the total number of parameters, put the\n\t\t// CFH header on the front of the buffer.\n\t\tbuf = append(cfh.Bytes(), buf...)\n\n\t\t// And put the command to the queue\n\t\terr = cmdQObj.Put(putmqmd, pmo, buf)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Now get the response\n\t\tgetmqmd := ibmmq.NewMQMD()\n\t\tgmo := ibmmq.NewMQGMO()\n\t\tgmo.Options = ibmmq.MQGMO_NO_SYNCPOINT\n\t\tgmo.Options |= ibmmq.MQGMO_FAIL_IF_QUIESCING\n\t\tgmo.Options |= ibmmq.MQGMO_WAIT\n\t\tgmo.Options |= ibmmq.MQGMO_CONVERT\n\t\tgmo.WaitInterval = 30 * 1000\n\n\t\t// Ought to add a loop here in case we get truncated data\n\t\tbuf = make([]byte, 32768)\n\n\t\tdatalen, err = replyQObj.Get(getmqmd, gmo, buf)\n\t\tif err == nil {\n\t\t\tcfh, offset := ibmmq.ReadPCFHeader(buf)\n\t\t\tif cfh.CompCode != ibmmq.MQCC_OK {\n\t\t\t\treturn fmt.Errorf(\"PCF command failed with CC %d RC %d\", cfh.CompCode, cfh.Reason)\n\t\t\t} else {\n\t\t\t\tparmAvail := true\n\t\t\t\tbytesRead := 0\n\t\t\t\tfor parmAvail && cfh.CompCode != ibmmq.MQCC_FAILED {\n\t\t\t\t\telem, bytesRead = ibmmq.ReadPCFParameter(buf[offset:])\n\t\t\t\t\toffset += bytesRead\n\t\t\t\t\t// Have we now reached the end of the message\n\t\t\t\t\tif offset >= datalen {\n\t\t\t\t\t\tparmAvail = false\n\t\t\t\t\t}\n\n\t\t\t\t\tswitch elem.Parameter {\n\t\t\t\t\tcase ibmmq.MQCACF_Q_NAMES:\n\t\t\t\t\t\tif len(elem.String) == 0 {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"No queues matching '%s' exist\", pattern)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfor i := 0; i < len(elem.String); i++ {\n\t\t\t\t\t\t\tqList = append(qList, strings.TrimSpace(elem.String[i]))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn err\n}",
"func ToStringList(genericList []interface{}) []string {\n\tstringList := []string{}\n\n\tfor _, value := range genericList {\n\t\tstringList = append(stringList, ToString(value))\n\t}\n\n\treturn stringList\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
insertItemList takes an repoID, table name, list of columns and an EPrint datastructure then generates and executes a series of INSERT statement to create an Item List for the given table. | func insertItemList(db *sql.DB, repoID string, tableName string, columns []string, eprint *EPrint) error {
var (
itemList ItemsInterface
)
eprintid := eprint.EPrintID
switch {
case strings.HasPrefix(tableName, `eprint_creators_`):
itemList = eprint.Creators
case strings.HasPrefix(tableName, `eprint_editors_`):
itemList = eprint.Editors
case strings.HasPrefix(tableName, `eprint_contributors_`):
itemList = eprint.Contributors
case strings.HasPrefix(tableName, `eprint_corp_creators`):
itemList = eprint.CorpCreators
case strings.HasPrefix(tableName, `eprint_corp_contributors_`):
itemList = eprint.CorpContributors
case strings.HasPrefix(tableName, `eprint_thesis_advisor_`):
itemList = eprint.ThesisAdvisor
case strings.HasPrefix(tableName, `eprint_thesis_committee_`):
itemList = eprint.ThesisCommittee
case strings.HasPrefix(tableName, `eprint_item_issues_`):
itemList = eprint.ItemIssues
case strings.HasPrefix(tableName, `eprint_alt_title`):
itemList = eprint.AltTitle
case strings.HasPrefix(tableName, `eprint_conductors`):
itemList = eprint.Conductors
case strings.HasPrefix(tableName, `eprint_conf_creators_`):
itemList = eprint.ConfCreators
case strings.HasPrefix(tableName, `eprint_exhibitors_`):
itemList = eprint.Exhibitors
case strings.HasPrefix(tableName, `eprint_producers_`):
itemList = eprint.Producers
case strings.HasPrefix(tableName, `eprint_lyricists_`):
itemList = eprint.Lyricists
case strings.HasPrefix(tableName, `eprint_accompaniment`):
itemList = eprint.Accompaniment
case strings.HasPrefix(tableName, `eprint_subjec`):
itemList = eprint.Subjects
case strings.HasPrefix(tableName, `eprint_local_`):
itemList = eprint.LocalGroup
case strings.HasPrefix(tableName, `eprint_div`):
itemList = eprint.Divisions
case strings.HasPrefix(tableName, `eprint_option_maj`):
itemList = eprint.OptionMajor
case strings.HasPrefix(tableName, `eprint_option_min`):
itemList = eprint.OptionMinor
case strings.HasPrefix(tableName, `eprint_funders_`):
itemList = eprint.Funders
case strings.HasPrefix(tableName, `eprint_funders`):
// Ignore, eprint_funders is empty in CaltechAUTHORS ...
itemList = new(FunderItemList)
case strings.HasPrefix(tableName, `eprint_other_numbering_system`):
itemList = eprint.OtherNumberingSystem
case strings.HasPrefix(tableName, `eprint_projects`):
itemList = eprint.Projects
case strings.HasPrefix(tableName, `eprint_referencetext`):
itemList = eprint.ReferenceText
case strings.HasPrefix(tableName, `eprint_related_url`):
itemList = eprint.RelatedURL
case strings.HasPrefix(tableName, `eprint_skill_areas`):
itemList = eprint.SkillAreas
case strings.HasPrefix(tableName, `eprint_patent_assignee`):
itemList = eprint.PatentAssignee
case strings.HasPrefix(tableName, `eprint_related_patents`):
itemList = eprint.RelatedPatents
case strings.HasPrefix(tableName, `eprint_referencetext`):
itemList = eprint.ReferenceText
case strings.HasPrefix(tableName, `eprint_accompaniment`):
itemList = eprint.Accompaniment
case strings.HasPrefix(tableName, `eprint_reference`):
itemList = eprint.Reference
case strings.HasPrefix(tableName, `eprint_copyright_holders`):
itemList = eprint.CopyrightHolders
case strings.HasPrefix(tableName, `eprint_related_patent`):
itemList = eprint.RelatedPatents
case strings.HasPrefix(tableName, `eprint_parent_assign`):
itemList = eprint.PatentAssignee
case strings.HasPrefix(tableName, `eprint_skill`):
itemList = eprint.SkillAreas
case strings.HasPrefix(tableName, `eprint_relation`):
// NOTE: This is not the same as document_relation_*, it is a separate item list item list
// it has the same structure with a uri and type. Our eprint implementations use a Relation
itemList = eprint.Relation
case strings.HasPrefix(tableName, `eprint_keyword`):
// NOTE: this we appear to use the longtext of key in our eprint table. Not sure if this
// is new or old structure. It is posssible that our longtext for keywords is a legacy structure.
// itemList = eprint.Keyword
default:
return fmt.Errorf(`do not understand table %q, columns %s`, tableName, strings.Join(columns, `, `))
}
// Clear the list, then insert
stmt := fmt.Sprintf(`DELETE FROM %s WHERE eprintid = ?`, tableName)
_, err := db.Exec(stmt, eprint.EPrintID)
if err != nil {
return fmt.Errorf(`SQL error, %q, %s`, stmt, err)
}
for pos := 0; pos < itemList.Length(); pos++ {
item := itemList.IndexOf(pos)
item.Pos = pos
values := []interface{}{}
columnsSQL := []string{}
for _, col := range columns {
switch {
case col == `eprintid`:
values = append(values, eprintid)
columnsSQL = append(columnsSQL, col)
case col == `pos`:
values = append(values, pos)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_id`):
values = append(values, item.ID)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_type`):
values = append(values, item.Type)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_family`):
values = append(values, item.Name.Family)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_given`):
values = append(values, item.Name.Given)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_honourific`):
values = append(values, item.Name.Honourific)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_lineage`):
values = append(values, item.Name.Lineage)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_name`):
values = append(values, item.Name.Value)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_show_email`):
// NOTE: _show_email needs to be tested before _email
values = append(values, item.ShowEMail)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_email`):
// NOTE: _show_email needs to be tested before _email
values = append(values, item.EMail)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_role`):
values = append(values, item.Role)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_url`):
values = append(values, item.URL)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `description`):
values = append(values, item.Description)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_agency`):
values = append(values, item.Agency)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_grant_number`):
values = append(values, item.GrantNumber)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_uri`):
values = append(values, item.URI)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_orcid`):
values = append(values, item.ORCID)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_ror`):
values = append(values, item.ROR)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_timestamp`):
values = append(values, item.Timestamp)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_status`):
values = append(values, item.Status)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_reported_by`):
values = append(values, item.ReportedBy)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_resolved_by`):
values = append(values, item.ResolvedBy)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_comment`):
values = append(values, item.ResolvedBy)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_group`):
values = append(values, item.Value)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_subjects`):
values = append(values, item.Value)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_major`):
values = append(values, item.Value)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_minor`):
values = append(values, item.Value)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `_holders`):
values = append(values, item.Value)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `divisions`):
values = append(values, item.Value)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `subjects`):
values = append(values, item.Value)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `referencetext`):
values = append(values, item.Value)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `accompaniment`):
values = append(values, item.Value)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `related_patents`):
values = append(values, item.Value)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `patent_assignee`):
values = append(values, item.Value)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `skill_areas`):
values = append(values, item.Value)
columnsSQL = append(columnsSQL, col)
case strings.HasSuffix(col, `alt_title`):
values = append(values, item.Value)
columnsSQL = append(columnsSQL, col)
default:
return fmt.Errorf("do not understand column %s.%s\n", tableName, col)
}
}
stmt := fmt.Sprintf(`INSERT INTO %s (%s) VALUES (%s)`, tableName, strings.Join(columnsSQL, `, `), strings.Join(qmList(len(columnsSQL)), `, `))
_, err := db.Exec(stmt, values...)
if err != nil {
return fmt.Errorf(`SQL error, %q, %s`, stmt, err)
}
}
return nil
} | [
"func buildItems(db *sql.DB) {\n\tstm := `CREATE TABLE IF NOT EXISTS items (\n\t\titem_id SERIAL PRIMARY KEY,\n\t\tname varchar(80),\n\t\tprice INT\n\t\t);`\n\t_, err := db.Exec(stm)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}",
"func InsertItem(offers []model.Offer) bool {\n\tdb, err := sql.Open(\"sqlite3\", dbPath)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn false\n\t}\n\tdefer db.Close()\n\n\tstmt, err := db.Prepare(\"INSERT INTO items(itemID, url, name, image, price, description, createdAt) values(?,?,?,?,?,?,?)\")\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn false\n\t}\n\n\tfor _, offer := range offers {\n\t\t_, err := stmt.Exec(offer.ID, offer.URL, offer.Name, offer.Image, offer.Price, offer.Description, int32(time.Now().Unix()))\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func insertItemIntoCart(username, pid string, qty int) {\n\n\tvar timeNow string = time.Now().Format(\"2006-01-02 15:04:05\")\n\n\tstmt, err := db.Prepare(\"insert into cartDB(un, pid, qty, time) values (?,?,?,?)\")\n\n\tcheck(err)\n\tdefer stmt.Close()\n\n\tr, err := stmt.Exec(username, pid, qty, timeNow)\n\n\tcheck(err)\n\n\tn, err := r.RowsAffected()\n\t_ = n\n\tcheck(err)\n\n}",
"func (i *ItemDB) Insert() *response.Error {\n\terr := stmtMap[\"itemInsert\"].QueryRow(i.HuntID, i.Name, i.Points).Scan(&i.ID)\n\tif err != nil {\n\t\treturn response.NewErrorf(http.StatusInternalServerError, \"error inserting item: %s\", err.Error())\n\t}\n\n\treturn nil\n}",
"func InsertItem(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\tvar item Item\n\tif err := json.NewDecoder(r.Body).Decode(&item); err != nil {\n\t\trespondWithError(w, http.StatusBadRequest, \"Invalid request payload\")\n\t\treturn\n\t} else if check := ItemFildValidation(item); check != \"ok\" {\n\t\trespondWithError(w, http.StatusBadRequest, check)\n\t\treturn\n\t}\n\tvar e BaseRepository = item\n\tif err := e.Insert(); err != nil {\n\t\trespondWithError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\trespondWithJson(w, http.StatusCreated, item)\n}",
"func (lst Listing) Insert() (int64, error) {\n\t// get the statement from the config\n\tquery := Config.SQLQueries[\"create_listing\"]\n\t// prepare the arguments using the lst paramater\n\targ := []interface{}{lst.Title, lst.Description, lst.ISBN, lst.Price, lst.Category, lst.SellerName, lst.ListingPassword, lst.Status}\n\t// prepare the statement\n\t//stmt, _ := db.Prepare(query)\n\tlog.Println(arg)\n\n\t// execute the statement with the args in the array\n\tres, err := db.Exec(query, arg...)\n\t// error check\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn res.LastInsertId()\n}",
"func InsertDB(m ...*Model) error {\n\n\tdb, err := sql.Open(\"sqlite3\", DB_NAME)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn err\n\t}\n\tdefer db.Close()\n\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn err\n\t}\n\n\tcommand := m[0].GenInsertInto()\n\tlog.Println(command)\n\tstmt, err := tx.Prepare(command)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn err\n\t}\n\tdefer stmt.Close()\n\n\tfor _, v := range m {\n\t\tlog.Println(\"Inserting...\", v.String())\n\n\t\t_, err = stmt.Exec(v.GenValues()...)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn err\n\t\t}\n\t\tlog.Println(v.String(), \" inserted!\")\n\t}\n\n\ttx.Commit()\n\treturn nil\n}",
"func (e *executor) Insert(list ...interface{}) error {\n\tvar err error\n\n\ttypes := types(list)\n\tquery := fmt.Sprintf(\"MULTI-INSERT <objects:%v>\", types)\n\n\tjgorp.Trace(e.ctx, e.name, query, func() error {\n\t\terr = e.inner.Insert(list...)\n\t\treturn err\n\t})\n\n\treturn err\n}",
"func (db db) bulkInsertItems(items []item) error {\n\tbulkRequest := db.client.Bulk()\n\tfor i, item := range items {\n\t\treq := elastic.NewBulkIndexRequest().Index(db.index).Type(\"item\").Id(strconv.Itoa(i)).Doc(item)\n\t\tbulkRequest = bulkRequest.Add(req)\n\t}\n\tbulkResponse, err := bulkRequest.Do(context.Background())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"bulkInsertItems: couldn't do bulk insert: %v\", err)\n\t}\n\tif bulkResponse != nil && bulkResponse.Errors {\n\t\treturn fmt.Errorf(\"bulkInsertItems: bulk insert had errors\")\n\t}\n\tif _, err := db.client.Refresh(db.index).Do(context.Background()); err != nil { // force instantly searchable\n\t\treturn fmt.Errorf(\"bulkInsertItems: index refresh had error: %v\", err)\n\t}\n\treturn nil\n}",
"func (p *postgres) CreateItem(value, iType, version string) (int64, error) {\n\tq := `INSERT INTO conf_item\n\t(conf_item_value, conf_item_type, conf_item_version)\n\tVALUES ($1, $2, $3) RETURNING conf_item_id`\n\n\treturn create(p.db, q, \"Item\", value, iType, version)\n}",
"func createStmInsert(iVal interface{}, tableName string) string {\n\t// iVal := &aldoutil.Product{}\n\tvar dbFieldNameS []string\n\tvar dbFieldNameColonS []string\n\n\tif tableName == \"\" {\n\t\tif t := reflect.TypeOf(iVal); t.Kind() == reflect.Ptr {\n\t\t\ttableName = strings.ToLower(t.Elem().Name())\n\t\t} else {\n\t\t\ttableName = strings.ToLower(t.Name())\n\t\t}\n\t}\n\n\tval := reflect.ValueOf(iVal).Elem()\n\tfor i := 0; i < val.NumField(); i++ {\n\t\tfieldType := val.Type().Field(i)\n\t\tdbFieldName := fieldType.Tag.Get(\"db\")\n\t\tdbFieldNameS = append(dbFieldNameS, dbFieldName)\n\t\tdbFieldNameColonS = append(dbFieldNameColonS, \":\"+dbFieldName)\n\t}\n\treturn \"INSERT INTO \" + tableName + \" (\" + strings.Join(dbFieldNameS, \", \") + \") VALUES (\" + strings.Join(dbFieldNameColonS, \", \") + \")\"\n}",
"func insertItem(items []Item, item Item, sortKey *KeyPartDef) []Item {\n\tif sortKey == nil {\n\t\treturn append(items, item)\n\t}\n\tless := lessFunc(items, item, sortKey)\n\ti := sort.Search(len(items), less)\n\t// insert at index i\n\treturn append(items[:i], append([]Item{item}, items[i:]...)...)\n}",
"func MakeInsertTableSQL(entitiesData _struct.FIELDVALUEAttributes) (cmd string) {\n\tvar cmdHead = \"INSERT INTO \" + entitiesData.Table\n\tvar cmdcol= \"\"\n\tvar cmdval = \"\"\n\tvar csplit = \"=\"\n\tlog.Debug(\"fieldvalue\")\n\tlog.Debug(entitiesData.FieldValue)\n\tfor _, fv := range entitiesData.FieldValue {\n\t\tlog.Debug(\"fv\")\n\t\tlog.Debug(fv) \n\t\t//var par = stings.Split(fv,csplit)\n\t\tvar par = SplitPars(fv,csplit)\n\t\tlog.Debug(par) \n\t\tif(len(par) > 1) {\n\t\t\tif(len(cmdcol)>0) { \t\t\t\t \n\t\t\t\tcmdcol = cmdcol+ \" , \"\n\t\t\t}\n\t\t\tif(len(cmdval)>0) { \t\t\t\t \n\t\t\t\tcmdval = cmdval + \" , \"\n\t\t\t}\n\t\t\tcmdcol = cmdcol + par[0]\n\t\t cmdval = cmdval + par[1]\n }\n\t}\n \n\tvar id= guuid.New().String()\t\n\tcmd = cmdHead + \"(ID,\" + cmdcol +\") VALUES ('\"+id+\"',\"+ cmdval+ \")\"\n return\n}",
"func (gb *graphicBuilder) putItemsInSlice(row *int, depth int, items []SequenceItem) {\n\tfor _, item := range items {\n\t\tswitch itemDetails := item.(type) {\n\t\tcase *Action:\n\t\t\tgb.putAction(*row, itemDetails)\n\t\tcase *Note:\n\t\t\tgb.putNote(*row, itemDetails)\n\t\tcase *Divider:\n\t\t\tgb.putDivider(*row, itemDetails)\n\t\tcase *Block:\n\t\t\tgb.putBlock(row, depth, itemDetails)\n\t\t}\n\n\t\t*row += 1\n\t}\n}",
"func (t *Tree) Insert(items ...Item) error {\n\n\tif len(items) > 1 {\n\t\t// sort before insert makes insertion much faster, no or less parent-child-relinking needed.\n\t\tsort.Slice(items, func(i, j int) bool { return items[i].Block.Compare(items[j].Block) < 0 })\n\t}\n\n\tfor i := range items {\n\t\tif err := t.Root.insertItem(items[i]); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (b *BadgerStore) insert(list []*badgerVertex) error {\n\ttxn := b.db.NewTransaction(true)\n\tdefer txn.Discard()\n\n\tfor i := range list {\n\t\tdata, err := json.Marshal(list[i])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := txn.Set([]byte(list[i].ID), data); err != nil {\n\t\t\tif err != badger.ErrTxnTooBig {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := txn.Commit(nil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\ttxn = b.db.NewTransaction(true)\n\t\t\tif err := txn.Set([]byte(list[i].ID), data); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Commit the transaction and check for error.\n\tif err := txn.Commit(nil); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (dr *DomRepeat) ItemsInserted(row, count int) {\n\tif dr.Delegate == \"\" {\n\t\tpanic(\"DomRepeat Delegate is not set\")\n\t}\n\tlist := reflect.ValueOf(dr.Items)\n\tfor i := 0; i < count; i++ {\n\t\titem := list.Index(row + i).Interface()\n\t\tdomItem := js.Global.Get(\"document\").Call(\"createElement\", dr.Delegate)\n\t\tdomItem.Get(\"__internal_object__\").Set(dr.ItemAs, item)\n\t\tshadowRoot := dr.Get(\"shadowRoot\")\n\t\tif shadowRoot.Get(\"children\").Length() == 0 {\n\t\t\tshadowRoot.Call(\"appendChild\", domItem)\n\t\t\tcontinue\n\t\t}\n\t\t//https://stackoverflow.com/questions/4793604/how-to-do-insert-after-in-javascript-without-using-a-library\n\t\treferenceNode := shadowRoot.Get(\"children\").Index(row + i - 1)\n\t\treferenceNode.Get(\"parentNode\").Call(\"insertBefore\", domItem, referenceNode.Get(\"nextSibling\"))\n\t}\n}",
"func ListItemsCommand(db CommandDB, table string, limit int64) *Command {\n\treturn &Command{\n\t\tID: CmdListItems,\n\t\tDB: db,\n\t\tStrArgs: []string{table},\n\t\tIntArg: limit,\n\t}\n}",
"func StoreItem(params *DBCommunication) error {\n\tsqlAdditem := `\n\tINSERT OR REPLACE INTO Game(\n\t\tId,\n\t\tName,\n\t\tBestScore,\n\t\tCurrScore,\n\t\tBoard,\n\t\tActX,\n\t\tActY,\n\t\tNextColors\n\t) values(?, ?, ?, ?, ?, ?, ?, ?)\n\t`\n\tstmt, err := database.Prepare(sqlAdditem)\n\tif err != nil {\n\t\treturn errors.New(\"Failed to prepare DB for write\")\n\t}\n\t_, err = stmt.Exec(params.id, params.username, params.bestScore, params.score, encodeTable(params.table), params.active.x, params.active.y, encodeTable(params.nextColors))\n\tif err != nil {\n\t\treturn errors.New(\"Failed to write to DB\")\n\t}\n\treturn nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
SQLCreateEPrint will read a EPrint structure and generate SQL INSERT, REPLACE and DELETE statements suitable for creating a new EPrint record in the repository. | func SQLCreateEPrint(config *Config, repoID string, ds *DataSource, eprint *EPrint) (int, error) {
var (
err error
)
db, ok := config.Connections[repoID]
if !ok {
return 0, fmt.Errorf(`no database connection for %s`, repoID)
}
// If eprint id is zero generate a sequence of INSERT statements
// for the record. Others use generate the appropriate History
// records and then delete insert the new record.
tableName := `eprint`
if columns, ok := ds.TableMap[tableName]; ok {
// Generate an empty row and capture the id created.
stmt := `INSERT INTO eprint (eprintid) (SELECT (IFNULL((SELECT eprintid FROM eprint ORDER BY eprintid DESC LIMIT 1), 0) + 1) AS eprintid)`
_, err := db.Exec(stmt)
if err != nil {
return 0, fmt.Errorf(`SQL error, %q, %s`, stmt, err)
}
stmt = `SELECT eprintid FROM eprint ORDER BY eprintid DESC LIMIT 1`
rows, err := db.Query(stmt)
if err != nil {
return 0, fmt.Errorf(`SQL error, %q, %s`, stmt, err)
}
id := 0
for rows.Next() {
if err := rows.Scan(&id); err != nil {
return 0, fmt.Errorf(`could not calculate the new eprintid value, %s`, err)
}
}
rows.Close()
if err != nil {
return 0, fmt.Errorf(`SQL failed to get insert id, %s`, err)
}
eprint.EPrintID = int(id)
eprint.Dir = makeDirValue(eprint.EPrintID)
// FIXME: decide if the is automatic or if this should be
// passed in with the data structure.
// Generate minimal date and time stamps
now := time.Now()
if eprint.Datestamp == "" {
eprint.Datestamp = now.Format(timestamp)
eprint.DatestampYear = now.Year()
eprint.DatestampMonth = int(now.Month())
eprint.DatestampDay = now.Day()
eprint.DatestampHour = now.Hour()
eprint.DatestampMinute = now.Minute()
eprint.DatestampSecond = now.Second()
} else if dt, err := time.Parse(datestamp, eprint.Datestamp); err == nil {
eprint.DatestampYear = dt.Year()
eprint.DatestampMonth = int(dt.Month())
eprint.DatestampDay = dt.Day()
} else if dt, err := time.Parse(timestamp, eprint.Datestamp); err == nil {
eprint.DatestampYear = dt.Year()
eprint.DatestampMonth = int(dt.Month())
eprint.DatestampDay = dt.Day()
eprint.DatestampHour = dt.Hour()
eprint.DatestampMinute = dt.Minute()
eprint.DatestampSecond = dt.Second()
}
eprint.LastModified = now.Format(timestamp)
eprint.LastModifiedYear = now.Year()
eprint.LastModifiedMonth = int(now.Month())
eprint.LastModifiedDay = now.Day()
eprint.LastModifiedHour = now.Hour()
eprint.LastModifiedMinute = now.Minute()
eprint.LastModifiedSecond = now.Second()
eprint.StatusChanged = now.Format(timestamp)
eprint.StatusChangedYear = now.Year()
eprint.StatusChangedMonth = int(now.Month())
eprint.StatusChangedDay = now.Day()
eprint.StatusChangedHour = now.Hour()
eprint.StatusChangedMinute = now.Minute()
eprint.StatusChangedSecond = now.Second()
if eprint.Date != "" {
eprint.DateYear, eprint.DateMonth, eprint.DateDay = approxYMD(eprint.Date)
}
if eprint.ThesisSubmittedDate != "" {
eprint.ThesisSubmittedDateYear, eprint.ThesisSubmittedDateMonth, eprint.ThesisSubmittedDateDay = approxYMD(eprint.ThesisSubmittedDate)
}
if eprint.ThesisDefenseDate != "" {
eprint.ThesisDefenseDateYear, eprint.ThesisDefenseDateMonth, eprint.ThesisDefenseDateDay = approxYMD(eprint.ThesisDefenseDate)
}
if eprint.ThesisApprovedDate != "" {
eprint.ThesisApprovedDateYear, eprint.ThesisApprovedDateMonth, eprint.ThesisApprovedDateDay = approxYMD(eprint.ThesisApprovedDate)
}
if eprint.ThesisPublicDate != "" {
eprint.ThesisPublicDateYear, eprint.ThesisPublicDateMonth, eprint.ThesisPublicDateDay = approxYMD(eprint.ThesisPublicDate)
}
if eprint.GradOfficeApprovalDate != "" {
eprint.GradOfficeApprovalDateYear, eprint.GradOfficeApprovalDateMonth, eprint.GradOfficeApprovalDateDay = approxYMD(eprint.GradOfficeApprovalDate)
}
// Step two, write the rest of the date into the main table.
columnsSQL, values := eprintToColumnsAndValues(eprint, columns, false)
stmt = fmt.Sprintf(`REPLACE INTO %s (%s) VALUES (%s)`,
tableName,
strings.Join(columnsSQL, `, `),
strings.Join(qmList(len(columnsSQL)), `, `))
_, err = db.Exec(stmt, values...)
if err != nil {
return 0, fmt.Errorf(`SQL error, %q, %s`, stmt, err)
}
}
if eprint.EPrintID != 0 {
for tableName, columns := range ds.TableMap {
// Handle the remaining tables, i.e. skip eprint table.
switch {
case tableName == `eprint`:
// Skip eprint table, we've already processed it
case tableName == `eprint_keyword`:
// Skip eprint_keyword, our EPrints use keywords (longtext) in eprint table.
case strings.HasPrefix(tableName, `document`):
//log.Printf(`FIXME %s columns: %s`, tableName, strings.Join(columns, `, `))
case strings.HasPrefix(tableName, `file`):
//log.Printf(`FIXME %s columns: %s`, tableName, strings.Join(columns, `, `))
default:
// Insert new rows in associated table
if err := insertItemList(db, repoID, tableName, columns, eprint); err != nil {
return eprint.EPrintID, fmt.Errorf(`failed to insert eprintid %d in table %s for %s, %s`, eprint.EPrintID, tableName, repoID, err)
}
}
}
return eprint.EPrintID, nil
}
return 0, err
} | [
"func SQLReadEPrint(config *Config, repoID string, baseURL string, eprintID int) (*EPrint, error) {\n\tvar (\n\t\ttables map[string][]string\n\t\tcolumns []string\n\t)\n\tif eprintID == 0 {\n\t\treturn nil, fmt.Errorf(\"not found, %d not in %q\", eprintID, repoID)\n\t}\n\t_, ok := config.Repositories[repoID]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"not found, %q not defined\", repoID)\n\t}\n\ttables = config.Repositories[repoID].TableMap\n\tcolumns, ok = tables[\"eprint\"]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"not found, %q eprint table not defined\", repoID)\n\t}\n\tdb, ok := config.Connections[repoID]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"no database connection for %s\", repoID)\n\t}\n\n\t// NOTE: since the specific subset of columns in a repository\n\t// are known only at run time we need to setup a generic pointer\n\t// array for the scan results based on our newly allocated\n\t// EPrint struct.\n\n\teprint := new(EPrint) // Generate an empty EPrint struct\n\teprint.EPrintID = eprintID\n\n\t// NOTE: The data is littered with NULLs in EPrints. We need to\n\t// generate both a map of values into the EPrint stucture and\n\t// aggregated the SQL Column definitions to deal with the NULL\n\t// values.\n\tcolumnSQL, values := eprintToColumnsAndValues(eprint, columns, true)\n\n\t// NOTE: With the \"values\" pointer array setup the query can be built\n\t// and executed in the usually SQL fashion.\n\tstmt := fmt.Sprintf(`SELECT %s FROM eprint WHERE eprintid = ? LIMIT 1`, strings.Join(columnSQL, `, `))\n\trows, err := db.Query(stmt, eprintID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(`ERROR: query error (%q, %q), %s`, repoID, stmt, err)\n\t}\n\tcnt := 0\n\tfor rows.Next() {\n\t\t// NOTE: Because values array holds the addresses into our\n\t\t// EPrint struct the \"Scan\" does the actual mapping.\n\t\t// This makes it sorta \"auto-magical\"\n\t\tif err := rows.Scan(values...); err != nil {\n\t\t\tlog.Printf(`%s.eprint eprintid = %d, %s`, repoID, eprintID, err)\n\t\t}\n\t\tcnt++\n\t}\n\trows.Close()\n\t// NOTE: need to handle zero rows returned!\n\tif cnt > 0 {\n\t\t// Normalize fields inferred from MySQL database tables.\n\t\teprint.ID = fmt.Sprintf(`%s/id/eprint/%d`, baseURL, eprint.EPrintID)\n\t\teprint.LastModified = makeTimestamp(eprint.LastModifiedYear, eprint.LastModifiedMonth, eprint.LastModifiedDay, eprint.LastModifiedHour, eprint.LastModifiedMinute, eprint.LastModifiedSecond)\n\t\t// NOTE: EPrint XML uses a datestamp for output but tracks a timestamp.\n\t\teprint.Datestamp = makeTimestamp(eprint.DatestampYear, eprint.DatestampMonth, eprint.DatestampDay, eprint.DatestampHour, eprint.DatestampMinute, eprint.DatestampSecond)\n\t\teprint.StatusChanged = makeTimestamp(eprint.StatusChangedYear, eprint.StatusChangedMonth, eprint.StatusChangedDay, eprint.StatusChangedHour, eprint.StatusChangedMinute, eprint.StatusChangedSecond)\n\t\teprint.Date = makeApproxDate(eprint.DateYear, eprint.DateMonth, eprint.DateDay)\n\n\t\t// FIXME: Add Depository info (eprint.userid -> user* tables)\n\t\t// deposited on, deposited by\n\t\tif eprint.UserID > 0 {\n\t\t\teprint.DepositedBy = userIDToName(repoID, eprint.UserID, db)\n\t\t\teprint.DepositedOn = makeTimestamp(eprint.DatestampYear, eprint.DatestampMonth, eprint.DatestampDay, eprint.DatestampHour, eprint.DatestampMinute, eprint.DatestampSecond)\n\t\t}\n\n\t\t// Used in CaltechTHESIS\n\t\teprint.ThesisSubmittedDate = makeDatestamp(eprint.ThesisSubmittedDateYear, eprint.ThesisSubmittedDateMonth, eprint.ThesisSubmittedDateDay)\n\t\teprint.ThesisDefenseDate = makeDatestamp(eprint.ThesisDefenseDateYear, eprint.ThesisDefenseDateMonth, eprint.ThesisDefenseDateDay)\n\t\teprint.ThesisApprovedDate = makeDatestamp(eprint.ThesisApprovedDateYear, eprint.ThesisApprovedDateMonth, eprint.ThesisApprovedDateDay)\n\t\teprint.ThesisPublicDate = makeDatestamp(eprint.ThesisPublicDateYear, eprint.ThesisPublicDateMonth, eprint.ThesisPublicDateDay)\n\t\teprint.ThesisDegreeDate = makeDatestamp(eprint.ThesisDegreeDateYear, eprint.ThesisDegreeDateMonth, eprint.ThesisDegreeDateDay)\n\t\teprint.GradOfficeApprovalDate = makeDatestamp(eprint.GradOfficeApprovalDateYear, eprint.GradOfficeApprovalDateMonth, eprint.GradOfficeApprovalDateDay)\n\n\t\t// CreatorsItemList\n\t\teprint.Creators = eprintIDToCreators(repoID, eprintID, db, tables)\n\t\t// EditorsItemList\n\t\teprint.Editors = eprintIDToEditors(repoID, eprintID, db, tables)\n\t\t// ContributorsItemList\n\t\teprint.Contributors = eprintIDToContributors(repoID, eprintID, db, tables)\n\n\t\t// CorpCreators\n\t\teprint.CorpCreators = eprintIDToCorpCreators(repoID, eprintID, db, tables)\n\t\t// CorpContributors\n\t\teprint.CorpContributors = eprintIDToCorpContributors(repoID, eprintID, db, tables)\n\n\t\t// LocalGroupItemList (SimpleItemList)\n\t\teprint.LocalGroup = eprintIDToLocalGroup(repoID, eprintID, db, tables)\n\t\t// FundersItemList (custom)\n\t\teprint.Funders = eprintIDToFunders(repoID, eprintID, db, tables)\n\t\t// Documents (*DocumentList)\n\t\teprint.Documents = eprintIDToDocumentList(repoID, baseURL, eprintID, db, tables)\n\t\t// RelatedURLs List\n\t\teprint.RelatedURL = eprintIDToRelatedURL(repoID, baseURL, eprintID, db, tables)\n\t\t// ReferenceText (item list)\n\t\teprint.ReferenceText = eprintIDToReferenceText(repoID, eprintID, db, tables)\n\t\t// Projects\n\t\teprint.Projects = eprintIDToProjects(repoID, eprintID, db, tables)\n\t\t// OtherNumberingSystem (item list)\n\t\teprint.OtherNumberingSystem = eprintIDToOtherNumberingSystem(repoID, eprintID, db, tables)\n\t\t// Subjects List\n\t\teprint.Subjects = eprintIDToSubjects(repoID, eprintID, db, tables)\n\t\t// ItemIssues\n\t\teprint.ItemIssues = eprintIDToItemIssues(repoID, eprintID, db, tables)\n\n\t\t// Exhibitors\n\t\teprint.Exhibitors = eprintIDToExhibitors(repoID, eprintID, db, tables)\n\t\t// Producers\n\t\teprint.Producers = eprintIDToProducers(repoID, eprintID, db, tables)\n\t\t// Conductors\n\t\teprint.Conductors = eprintIDToConductors(repoID, eprintID, db, tables)\n\n\t\t// Lyricists\n\t\teprint.Lyricists = eprintIDToLyricists(repoID, eprintID, db, tables)\n\n\t\t// Accompaniment\n\t\teprint.Accompaniment = eprintIDToAccompaniment(repoID, eprintID, db, tables)\n\t\t// SkillAreas\n\t\teprint.SkillAreas = eprintIDToSkillAreas(repoID, eprintID, db, tables)\n\t\t// CopyrightHolders\n\t\teprint.CopyrightHolders = eprintIDToCopyrightHolders(repoID, eprintID, db, tables)\n\t\t// Reference\n\t\teprint.Reference = eprintIDToReference(repoID, eprintID, db, tables)\n\n\t\t// ConfCreators\n\t\teprint.ConfCreators = eprintIDToConfCreators(repoID, eprintID, db, tables)\n\t\t// AltTitle\n\t\teprint.AltTitle = eprintIDToAltTitle(repoID, eprintID, db, tables)\n\t\t// PatentAssignee\n\t\teprint.PatentAssignee = eprintIDToPatentAssignee(repoID, eprintID, db, tables)\n\t\t// RelatedPatents\n\t\teprint.RelatedPatents = eprintIDToRelatedPatents(repoID, eprintID, db, tables)\n\t\t// Divisions\n\t\teprint.Divisions = eprintIDToDivisions(repoID, eprintID, db, tables)\n\t\t// ThesisAdvisor\n\t\teprint.ThesisAdvisor = eprintIDToThesisAdvisors(repoID, eprintID, db, tables)\n\t\t// ThesisCommittee\n\t\teprint.ThesisCommittee = eprintIDToThesisCommittee(repoID, eprintID, db, tables)\n\n\t\t// OptionMajor\n\t\teprint.OptionMajor = eprintIDToOptionMajor(repoID, eprintID, db, tables)\n\t\t// OptionMinor\n\t\teprint.OptionMinor = eprintIDToOptionMinor(repoID, eprintID, db, tables)\n\n\t\t/*************************************************************\n\t\t NOTE: These are notes about possible original implementation\n\t\t errors or elements that did not survive the upgrade to\n\t\t EPrints 3.3.16\n\n\t\t eprint.LearningLevels (not an item list in EPrints) using LearningLevelText\n\t\t GScholar, skipping not an item list, a 2010 plugin for EPRints 3.2.\n\t\t eprint.GScholar = eprintIDToGScholar(repoID, eprintID, db, tables)\n\t\t Shelves, a plugin, not replicating, not an item list\n\t\t eprint.Shelves = eprintIDToSchelves(repoID, eprintID, db, tables)\n\t\t eprint.PatentClassification is not not an item list, using eprint.PatentClassificationText\n\t\t eprint.OtherURL appears to be an extraneous\n\t\t eprint.CorpContributors apears to be an extraneous\n\t\t*************************************************************/\n\t} else {\n\t\treturn nil, fmt.Errorf(\"not found\")\n\t}\n\n\treturn eprint, nil\n}",
"func CreateEbook(e *ebook) error {\n\tq := `INSERT INTO \n\t\t\t\t\tebooks (name, date, study)\n\t\t\t\t\tVALUES ($1, $2, $3)`\n\n\tdb := getConnection()\n\tdefer db.Close()\n\n\tstmt, err := db.Prepare(q)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stmt.Close()\n\n\tr, err := stmt.Exec(e.name, e.date, e.study)\n\tif err != nil {\n\t\treturn err\n\t}\n\ti, _ := r.RowsAffected()\n\tif i != 1 {\n\t\treturn errors.New(\"We were specting 1 row afected\")\n\n\t}\n\treturn nil\n\n}",
"func ImportEPrints(config *Config, repoID string, ds *DataSource, eprints *EPrints) ([]int, error) {\n\tvar importErrors error\n\tids := []int{}\n\n\tif config.Connections == nil {\n\t\treturn nil, fmt.Errorf(`no databases are not configured`)\n\t}\n\t_, ok := config.Connections[repoID]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(`%s database connection not configured`, repoID)\n\t}\n\n\t// Check to make sure updates are allowed if non-Zero\n\t// eprint ids present.\n\tfor _, eprint := range eprints.EPrint {\n\t\tif eprint.EPrintID != 0 {\n\t\t\treturn nil, fmt.Errorf(\"create failed eprint id %d in %s\", eprint.EPrintID, repoID)\n\t\t}\n\t\tif eprint.Collection == \"\" && ds.DefaultCollection != \"\" {\n\t\t\teprint.Collection = DefaultCollection\n\t\t}\n\t\tif eprint.IDNumber == \"\" && ds.DefaultOfficialURL != \"\" {\n\t\t\teprint.IDNumber = GenerateIDNumber(eprint)\n\t\t}\n\t\tif eprint.OfficialURL == \"\" && ds.DefaultOfficialURL != \"\" {\n\t\t\teprint.OfficialURL = GenerateOfficialURL(eprint)\n\t\t}\n\t\tif eprint.Rights == \"\" && ds.DefaultRights != \"\" {\n\t\t\teprint.Rights = ds.DefaultRights\n\t\t}\n\t\tif eprint.Refereed == \"\" && eprint.Type == \"article\" &&\n\t\t\tds.DefaultRefereed != \"\" {\n\t\t\teprint.Refereed = ds.DefaultRefereed\n\t\t}\n\t\tif eprint.EPrintStatus == \"\" && ds.DefaultStatus != \"\" {\n\t\t\teprint.EPrintStatus = ds.DefaultStatus\n\t\t}\n\t\tif eprint.Abstract != \"\" && ds.StripTags {\n\t\t\tif cleaner.HasEncodedElements([]byte(eprint.Abstract)) {\n\t\t\t\teprint.Abstract = string(cleaner.StripTags([]byte(eprint.Abstract)))\n\t\t\t}\n\t\t}\n\t}\n\tfor _, eprint := range eprints.EPrint {\n\t\tid, err := SQLCreateEPrint(config, repoID, ds, eprint)\n\t\tif err != nil {\n\t\t\tif importErrors == nil {\n\t\t\t\timportErrors = err\n\t\t\t} else {\n\t\t\t\timportErrors = fmt.Errorf(\"%s; %s\", importErrors, err)\n\t\t\t}\n\t\t}\n\t\tids = append(ids, id)\n\t}\n\treturn ids, importErrors\n}",
"func (r *PartnerDB) Create(e *entity.Partner) error {\n\tstmt, err := r.db.Prepare(`insert into partner (partner_name, created_at) values(?,?)`)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = stmt.Exec(\n\t\te.PartnerName,\n\t\ttime.Now().Format(\"2006-01-02\"),\n\t)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = stmt.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (r SchemaResolver) CreatePrinter(ctx context.Context, args CreatePrintersQueryArgs) (*PrinterResolver, error) {\n\tclient := ctx.Value(\"client\").(*printdb.Client)\n\n\tprinterID, err := client.CreatePrinter(printdb.NewPrinterRequest{Endpoint: args.Endpoint, Name: args.Name, APIKey: args.APIKey, IntegrationType: args.IntegrationType})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewPrinter(ctx, NewPrinterArgs{ID: printerID})\n}",
"func CreateFood(barcode, name, ingredients, grade string, avgGrade float64) {\n\tstmt, err := db.Prepare(\"insert into food values(?, ?, ?, ?, ?)\")\n\tif err != nil {\n\t\tlog.Fatalln(\"server.CreateFood: \", err)\n\t}\n\t// Fire query string\n\t_, err = stmt.Query(barcode, name, ingredients, grade, avgGrade)\n\tif err != nil {\n\t\tlog.Fatalln(\"server.CreateFood: \", err)\n\t}\n}",
"func (db *DB) Create() (err error) {\n\ttx, err := db.connection.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := tx.Exec(itemSchema); err != nil {\n\t\treturn handle.Transaction(tx, err)\n\t}\n\n\tif _, err := tx.Exec(enclosureSchema); err != nil {\n\t\treturn handle.Transaction(tx, err)\n\t}\n\n\tif _, err := tx.Exec(channelSchema); err != nil {\n\t\treturn handle.Transaction(tx, err)\n\t}\n\n\tif _, err := tx.Exec(publishSchema); err != nil {\n\t\treturn handle.Transaction(tx, err)\n\t}\n\n\treturn tx.Commit()\n}",
"func (m *MongoDbConn) InsertPrint(p *Print) error { return m.adminPrint(DBCmdInsert, p) }",
"func CreateBuiltinEql() *functions.BuiltinFunction {\n\treturn functions.NewBuiltinFunction(Eql, 2, true)\n}",
"func generateCreateSQL(projectPath string) error {\n\n\t// Set up a Create-Database migration, which comes first\n\tname := path.Base(projectPath)\n\td := ConfigDevelopment[\"db\"]\n\tu := ConfigDevelopment[\"db_user\"]\n\tp := ConfigDevelopment[\"db_pass\"]\n\tsql := fmt.Sprintf(\"/* Setup database for %s */\\nCREATE USER \\\"%s\\\" WITH PASSWORD '%s';\\nCREATE DATABASE \\\"%s\\\" WITH OWNER \\\"%s\\\";\", name, u, p, d, u)\n\n\t// Generate a migration to create db with today's date\n\tfile := migrationPath(projectPath, createDatabaseMigrationName)\n\terr := ioutil.WriteFile(file, []byte(sql), 0744)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If we have a Create-Tables file, copy it out to a new migration with today's date\n\tcreateTablesPath := path.Join(projectPath, \"db\", \"migrate\", createTablesMigrationName+\".sql.tmpl\")\n\tif fileExists(createTablesPath) {\n\t\tsql, err := ioutil.ReadFile(createTablesPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Now vivify the template, for now we just replace one key\n\t\tsqlString := strings.Replace(string(sql), \"[[.fragmenta_db_user]]\", u, -1)\n\n\t\tfile = migrationPath(projectPath, createTablesMigrationName)\n\t\terr = ioutil.WriteFile(file, []byte(sqlString), 0744)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Remove the old file\n\t\tos.Remove(createTablesPath)\n\n\t} else {\n\t\tfmt.Printf(\"NO TABLES %s\", createTablesPath)\n\t}\n\n\treturn nil\n}",
"func CreateGuest(e Event, c chan error, mutex *sync.Mutex) {\n\tif e.GuestDetails.Email == \"\" {\n\t\tc <- nil\n\t\treturn\n\t}\n\tmutex.Lock()\n\tresult, err := Session.Run(`MATCH(a:EVENT) WHERE a.name=$EventName\n\tCREATE (n:GUEST {name:$name, stake:$stake,\n\temail:$email, phoneNumber:$phoneNumber, gender: $gender, locationOfStay:$locationOfStay\n\t})<-[:GUEST]-(a) `, map[string]interface{}{\n\t\t\"EventName\": e.Name,\n\t\t\"name\": e.GetField(\"GuestDetails\", \"Name\"),\n\t\t\"stake\": e.GetField(\"GuestDetails\", \"Stake\"),\n\t\t\"email\": e.GetField(\"GuestDetails\", \"Email\"),\n\t\t\"phoneNumber\": e.GetField(\"GuestDetails\", \"PhoneNumber\"),\n\t\t\"gender\": e.GetField(\"GuestDetails\", \"Gender\"),\n\t\t\"locationOfStay\": e.GetField(\"GuestDetails\", \"LocationOfStay\"),\n\t})\n\tif err != nil {\n\t\tc <- err\n\t\treturn\n\t}\n\tmutex.Unlock()\n\n\tif err = result.Err(); err != nil {\n\t\tc <- err\n\t\treturn\n\t}\n\tlog.Println(\"Created GUEST node\")\n\tc <- nil\n\treturn\n}",
"func (m *MongoDbConn) adminPrint(cmd DBCommand, p *Print) error {\n\n\tdblock.Lock()\n\tdefer dblock.Unlock()\n\n\tcoll := m.Sess.DB(m.name).C(\"prints\")\n\tif coll == nil {\n\t\treturn fmt.Errorf(\"Handling a graphic print: MongoDB descriptor empty.\")\n\t}\n\n\tif p == nil {\n\t\treturn fmt.Errorf(\"Handling a graphic print: cannot create empty painting.\")\n\t}\n\n\tvar err error\n\tswitch cmd {\n\n\tcase DBCmdUpdate:\n\t\tp.Modified = NewTimestamp() // update modified timestamp first...\n\t\terr = coll.UpdateId(p.ID, p)\n\n\tcase DBCmdInsert:\n\t\tp.Created = NewTimestamp() // create new timestamp first...\n\t\terr = coll.Insert(p)\n\n\tcase DBCmdDelete:\n\t\terr = coll.RemoveId(p.ID)\n\n\tdefault:\n\t\terr = fmt.Errorf(\"Handling a graphic print: Unknown DB command.\")\n\t}\n\treturn err\n}",
"func CreateDB(db *sql.DB) error {\n\tmyDB = db\n\n\t_, err := myDB.Exec(`\n CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\";\n CREATE TABLE IF NOT EXISTS feeds (\n\t\t\t\t\t\t\t\tid UUID PRIMARY KEY NOT NULL,\n unread_events UUID ARRAY,\n events UUID ARRAY,\n\t\t\t\t\t\t\t\tcreated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n )\n `)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = prepareStatements()\n\n\treturn err\n}",
"func ProductLineCreate(productline ProductLine) (ProductLine, error) {\n\tif err := db.Create(&productline).Error; err != nil {\n\t\treturn ProductLine{}, err\n\t}\n\n\treturn productline, nil\n}",
"func generateCreateTableSQL(params TableParameters, properties TableProperties) string {\n\tcolumnsStr := generateColumnListSQL(params.Columns)\n\n\ttableType := \"\"\n\tif properties.External {\n\t\ttableType = \"EXTERNAL\"\n\t}\n\n\tifNotExists := \"\"\n\tif params.IgnoreExists {\n\t\tifNotExists = \"IF NOT EXISTS\"\n\t}\n\n\tpartitionedBy := \"\"\n\tif len(params.Partitions) != 0 {\n\t\tpartitionedBy = fmt.Sprintf(\"PARTITIONED BY (%s)\", generateColumnListSQL(params.Partitions))\n\t}\n\n\tserdeFormatStr := \"\"\n\tif properties.SerdeFormat != \"\" && properties.SerdeRowProperties != nil {\n\t\tserdeFormatStr = fmt.Sprintf(\"ROW FORMAT SERDE '%s' WITH SERDEPROPERTIES (%s)\", properties.SerdeFormat, generateSerdeRowPropertiesSQL(properties.SerdeRowProperties))\n\t}\n\tlocation := \"\"\n\tif properties.Location != \"\" {\n\t\tlocation = fmt.Sprintf(`LOCATION \"%s\"`, properties.Location)\n\t}\n\tformat := \"\"\n\tif properties.FileFormat != \"\" {\n\t\tformat = fmt.Sprintf(\"STORED AS %s\", properties.FileFormat)\n\t}\n\treturn fmt.Sprintf(\n\t\t`CREATE %s TABLE %s\n%s (%s) %s\n%s %s %s`,\n\t\ttableType, ifNotExists,\n\t\tparams.Name, columnsStr, partitionedBy,\n\t\tserdeFormatStr, format, location,\n\t)\n}",
"func CreateNewDB() {\n\t// create users table\n\t_, err := global.DB.Exec(`Create table users(id serial primary key,\n\t\t\t\t\t\t\t username varchar(50) unique,\n\t\t\t\t\t\t\t email text unique,\n\t\t\t\t\t\t\t password_hash varchar(60) NOT NULL\n\t\t\t\t\t\t\t )`)\n\tif err != nil {\n\t\tfmt.Println(\"Error while creating users database:\", err)\n\t\treturn\n\t}\n\n\t// create poll table\n\t_, err = global.DB.Exec(`Create table poll(id serial primary key,\n\t\t\t\t\t\t\t created_by integer references users(id) on delete cascade,\n\t\t\t\t\t\t\t time TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,\n\t\t\t\t\t\t\t title text)`)\n\tif err != nil {\n\t\tfmt.Println(\"Error while creating poll table:\", err)\n\t\treturn\n\t}\n\n\t// create poll options table\n\t_, err = global.DB.Exec(`create table pollOption(id serial primary key,\n\t\t\t\t\t\t\t poll_id integer references Poll(id) on delete cascade,\n\t\t\t\t\t\t\t option text)`)\n\tif err != nil {\n\t\tfmt.Println(\"Error while creating pooloption table:\", err)\n\t\treturn\n\t}\n\n\t// create vote table\n\t_, err = global.DB.Exec(`create table vote(id serial,\n\t\t\t\t\t\t\t poll_id integer references Poll(id) on delete cascade,\n\t\t\t\t\t\t\t option_id integer references pollOption(id) on delete cascade,\n\t\t\t\t\t\t\t voted_by integer references users(id) on delete cascade);`)\n\tif err != nil {\n\t\tfmt.Println(\"Error while creating vote table:\", err)\n\t\treturn\n\t}\n}",
"func CreateEdition(host, datasetID, edition string) (*EditionUpdate, error) {\n\tid, err := uuid.NewV4()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &EditionUpdate{\n\t\tID: id.String(),\n\t\tNext: &Edition{\n\t\t\tEdition: edition,\n\t\t\tState: EditionConfirmedState,\n\t\t\tLinks: &EditionUpdateLinks{\n\t\t\t\tDataset: &LinkObject{\n\t\t\t\t\tID: datasetID,\n\t\t\t\t\tHRef: fmt.Sprintf(\"%s/datasets/%s\", host, datasetID),\n\t\t\t\t},\n\t\t\t\tSelf: &LinkObject{\n\t\t\t\t\tHRef: fmt.Sprintf(\"%s/datasets/%s/editions/%s\", host, datasetID, edition),\n\t\t\t\t},\n\t\t\t\tVersions: &LinkObject{\n\t\t\t\t\tHRef: fmt.Sprintf(\"%s/datasets/%s/editions/%s/versions\", host, datasetID, edition),\n\t\t\t\t},\n\t\t\t\tLatestVersion: &LinkObject{\n\t\t\t\t\tID: \"1\",\n\t\t\t\t\tHRef: fmt.Sprintf(\"%s/datasets/%s/editions/%s/versions/1\", host, datasetID, edition),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}, nil\n}",
"func (f *SQLFormatter) Fprint(w io.Writer, t *Table) {\r\n\r\n\tif f == nil || t == nil {\r\n\t\treturn\r\n\t}\r\n\r\n\tfmt.Fprintf(w, \"DROP TABLE IF EXISTS %[1]s;\\nCREATE TABLE `%[1]s` (\\n\", t.Name)\r\n\r\n\tfor i, c := range t.Columns {\r\n\t\tes := make([]string, 0, 6)\r\n\t\tes = append(es, \" `\"+c.Name+\"`\")\r\n\t\tes = append(es, c.Type)\r\n\t\tif c.NotNull {\r\n\t\t\tes = append(es, \"NOT NULL\")\r\n\t\t}\r\n\t\tif c.Option != \"\" {\r\n\t\t\tes = append(es, c.Option)\r\n\t\t}\r\n\t\tif c.Unique {\r\n\t\t\tes = append(es, \"UNIQUE\")\r\n\t\t}\r\n\t\tif c.Comment != \"\" {\r\n\t\t\tes = append(es, \"COMMENT '\"+c.Comment+\"'\")\r\n\t\t}\r\n\t\tfmt.Fprint(w, strings.Join(es, \" \"))\r\n\t\tif i < len(t.Columns)-1 {\r\n\t\t\tfmt.Fprint(w, \",\\n\")\r\n\t\t}\r\n\t}\r\n\r\n\tif len(t.PKeyColumns) > 0 {\r\n\t\tfmt.Fprintf(w, \",\\n PRIMARY KEY (%s)\", strings.Join(t.PKeyColumns, \", \"))\r\n\t}\r\n\tfor _, k := range t.UniqueKeys {\r\n\t\tfmt.Fprintf(w, \",\\n UNIQUE KEY `%s` (%s)\", k.Name, strings.Join(k.Columns, \", \"))\r\n\t}\r\n\tfor _, k := range t.IndexKeys {\r\n\t\tfmt.Fprintf(w, \",\\n INDEX `%s` (%s)\", k.Name, strings.Join(k.Columns, \", \"))\r\n\t}\r\n\r\n\tfmt.Fprintln(w, \"\\n);\")\r\n}",
"func (b *BudgetAction) Create(db *sql.DB) (err error) {\n\terr = db.QueryRow(`INSERT INTO budget_action (code, name, program_id, sector_id) \n\tVALUES($1,$2,$3,$4) RETURNING id`, b.Code, b.Name, b.ProgramID, b.SectorID).Scan(&b.ID)\n\treturn err\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ImportEPrints take an repository id, eprints structure. It is a reimplementation of EPrints Perl Import EPrint XML. ImportEPrints will create EPrint records using SQLCreateEPrint function. ImportEPrint is responsible for applly any rule sets and eprint status, SQLCreateEPRint is responsible for datestamps and timestamps. ImportEPrints returns a list of EPrint IDs created if successful and an error if something goes wrong. | func ImportEPrints(config *Config, repoID string, ds *DataSource, eprints *EPrints) ([]int, error) {
var importErrors error
ids := []int{}
if config.Connections == nil {
return nil, fmt.Errorf(`no databases are not configured`)
}
_, ok := config.Connections[repoID]
if !ok {
return nil, fmt.Errorf(`%s database connection not configured`, repoID)
}
// Check to make sure updates are allowed if non-Zero
// eprint ids present.
for _, eprint := range eprints.EPrint {
if eprint.EPrintID != 0 {
return nil, fmt.Errorf("create failed eprint id %d in %s", eprint.EPrintID, repoID)
}
if eprint.Collection == "" && ds.DefaultCollection != "" {
eprint.Collection = DefaultCollection
}
if eprint.IDNumber == "" && ds.DefaultOfficialURL != "" {
eprint.IDNumber = GenerateIDNumber(eprint)
}
if eprint.OfficialURL == "" && ds.DefaultOfficialURL != "" {
eprint.OfficialURL = GenerateOfficialURL(eprint)
}
if eprint.Rights == "" && ds.DefaultRights != "" {
eprint.Rights = ds.DefaultRights
}
if eprint.Refereed == "" && eprint.Type == "article" &&
ds.DefaultRefereed != "" {
eprint.Refereed = ds.DefaultRefereed
}
if eprint.EPrintStatus == "" && ds.DefaultStatus != "" {
eprint.EPrintStatus = ds.DefaultStatus
}
if eprint.Abstract != "" && ds.StripTags {
if cleaner.HasEncodedElements([]byte(eprint.Abstract)) {
eprint.Abstract = string(cleaner.StripTags([]byte(eprint.Abstract)))
}
}
}
for _, eprint := range eprints.EPrint {
id, err := SQLCreateEPrint(config, repoID, ds, eprint)
if err != nil {
if importErrors == nil {
importErrors = err
} else {
importErrors = fmt.Errorf("%s; %s", importErrors, err)
}
}
ids = append(ids, id)
}
return ids, importErrors
} | [
"func GetAllEPrintIDs(config *Config, repoID string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint\nORDER BY date_year DESC, date_month DESC, date_day DESC`)\n}",
"func SQLReadEPrint(config *Config, repoID string, baseURL string, eprintID int) (*EPrint, error) {\n\tvar (\n\t\ttables map[string][]string\n\t\tcolumns []string\n\t)\n\tif eprintID == 0 {\n\t\treturn nil, fmt.Errorf(\"not found, %d not in %q\", eprintID, repoID)\n\t}\n\t_, ok := config.Repositories[repoID]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"not found, %q not defined\", repoID)\n\t}\n\ttables = config.Repositories[repoID].TableMap\n\tcolumns, ok = tables[\"eprint\"]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"not found, %q eprint table not defined\", repoID)\n\t}\n\tdb, ok := config.Connections[repoID]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"no database connection for %s\", repoID)\n\t}\n\n\t// NOTE: since the specific subset of columns in a repository\n\t// are known only at run time we need to setup a generic pointer\n\t// array for the scan results based on our newly allocated\n\t// EPrint struct.\n\n\teprint := new(EPrint) // Generate an empty EPrint struct\n\teprint.EPrintID = eprintID\n\n\t// NOTE: The data is littered with NULLs in EPrints. We need to\n\t// generate both a map of values into the EPrint stucture and\n\t// aggregated the SQL Column definitions to deal with the NULL\n\t// values.\n\tcolumnSQL, values := eprintToColumnsAndValues(eprint, columns, true)\n\n\t// NOTE: With the \"values\" pointer array setup the query can be built\n\t// and executed in the usually SQL fashion.\n\tstmt := fmt.Sprintf(`SELECT %s FROM eprint WHERE eprintid = ? LIMIT 1`, strings.Join(columnSQL, `, `))\n\trows, err := db.Query(stmt, eprintID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(`ERROR: query error (%q, %q), %s`, repoID, stmt, err)\n\t}\n\tcnt := 0\n\tfor rows.Next() {\n\t\t// NOTE: Because values array holds the addresses into our\n\t\t// EPrint struct the \"Scan\" does the actual mapping.\n\t\t// This makes it sorta \"auto-magical\"\n\t\tif err := rows.Scan(values...); err != nil {\n\t\t\tlog.Printf(`%s.eprint eprintid = %d, %s`, repoID, eprintID, err)\n\t\t}\n\t\tcnt++\n\t}\n\trows.Close()\n\t// NOTE: need to handle zero rows returned!\n\tif cnt > 0 {\n\t\t// Normalize fields inferred from MySQL database tables.\n\t\teprint.ID = fmt.Sprintf(`%s/id/eprint/%d`, baseURL, eprint.EPrintID)\n\t\teprint.LastModified = makeTimestamp(eprint.LastModifiedYear, eprint.LastModifiedMonth, eprint.LastModifiedDay, eprint.LastModifiedHour, eprint.LastModifiedMinute, eprint.LastModifiedSecond)\n\t\t// NOTE: EPrint XML uses a datestamp for output but tracks a timestamp.\n\t\teprint.Datestamp = makeTimestamp(eprint.DatestampYear, eprint.DatestampMonth, eprint.DatestampDay, eprint.DatestampHour, eprint.DatestampMinute, eprint.DatestampSecond)\n\t\teprint.StatusChanged = makeTimestamp(eprint.StatusChangedYear, eprint.StatusChangedMonth, eprint.StatusChangedDay, eprint.StatusChangedHour, eprint.StatusChangedMinute, eprint.StatusChangedSecond)\n\t\teprint.Date = makeApproxDate(eprint.DateYear, eprint.DateMonth, eprint.DateDay)\n\n\t\t// FIXME: Add Depository info (eprint.userid -> user* tables)\n\t\t// deposited on, deposited by\n\t\tif eprint.UserID > 0 {\n\t\t\teprint.DepositedBy = userIDToName(repoID, eprint.UserID, db)\n\t\t\teprint.DepositedOn = makeTimestamp(eprint.DatestampYear, eprint.DatestampMonth, eprint.DatestampDay, eprint.DatestampHour, eprint.DatestampMinute, eprint.DatestampSecond)\n\t\t}\n\n\t\t// Used in CaltechTHESIS\n\t\teprint.ThesisSubmittedDate = makeDatestamp(eprint.ThesisSubmittedDateYear, eprint.ThesisSubmittedDateMonth, eprint.ThesisSubmittedDateDay)\n\t\teprint.ThesisDefenseDate = makeDatestamp(eprint.ThesisDefenseDateYear, eprint.ThesisDefenseDateMonth, eprint.ThesisDefenseDateDay)\n\t\teprint.ThesisApprovedDate = makeDatestamp(eprint.ThesisApprovedDateYear, eprint.ThesisApprovedDateMonth, eprint.ThesisApprovedDateDay)\n\t\teprint.ThesisPublicDate = makeDatestamp(eprint.ThesisPublicDateYear, eprint.ThesisPublicDateMonth, eprint.ThesisPublicDateDay)\n\t\teprint.ThesisDegreeDate = makeDatestamp(eprint.ThesisDegreeDateYear, eprint.ThesisDegreeDateMonth, eprint.ThesisDegreeDateDay)\n\t\teprint.GradOfficeApprovalDate = makeDatestamp(eprint.GradOfficeApprovalDateYear, eprint.GradOfficeApprovalDateMonth, eprint.GradOfficeApprovalDateDay)\n\n\t\t// CreatorsItemList\n\t\teprint.Creators = eprintIDToCreators(repoID, eprintID, db, tables)\n\t\t// EditorsItemList\n\t\teprint.Editors = eprintIDToEditors(repoID, eprintID, db, tables)\n\t\t// ContributorsItemList\n\t\teprint.Contributors = eprintIDToContributors(repoID, eprintID, db, tables)\n\n\t\t// CorpCreators\n\t\teprint.CorpCreators = eprintIDToCorpCreators(repoID, eprintID, db, tables)\n\t\t// CorpContributors\n\t\teprint.CorpContributors = eprintIDToCorpContributors(repoID, eprintID, db, tables)\n\n\t\t// LocalGroupItemList (SimpleItemList)\n\t\teprint.LocalGroup = eprintIDToLocalGroup(repoID, eprintID, db, tables)\n\t\t// FundersItemList (custom)\n\t\teprint.Funders = eprintIDToFunders(repoID, eprintID, db, tables)\n\t\t// Documents (*DocumentList)\n\t\teprint.Documents = eprintIDToDocumentList(repoID, baseURL, eprintID, db, tables)\n\t\t// RelatedURLs List\n\t\teprint.RelatedURL = eprintIDToRelatedURL(repoID, baseURL, eprintID, db, tables)\n\t\t// ReferenceText (item list)\n\t\teprint.ReferenceText = eprintIDToReferenceText(repoID, eprintID, db, tables)\n\t\t// Projects\n\t\teprint.Projects = eprintIDToProjects(repoID, eprintID, db, tables)\n\t\t// OtherNumberingSystem (item list)\n\t\teprint.OtherNumberingSystem = eprintIDToOtherNumberingSystem(repoID, eprintID, db, tables)\n\t\t// Subjects List\n\t\teprint.Subjects = eprintIDToSubjects(repoID, eprintID, db, tables)\n\t\t// ItemIssues\n\t\teprint.ItemIssues = eprintIDToItemIssues(repoID, eprintID, db, tables)\n\n\t\t// Exhibitors\n\t\teprint.Exhibitors = eprintIDToExhibitors(repoID, eprintID, db, tables)\n\t\t// Producers\n\t\teprint.Producers = eprintIDToProducers(repoID, eprintID, db, tables)\n\t\t// Conductors\n\t\teprint.Conductors = eprintIDToConductors(repoID, eprintID, db, tables)\n\n\t\t// Lyricists\n\t\teprint.Lyricists = eprintIDToLyricists(repoID, eprintID, db, tables)\n\n\t\t// Accompaniment\n\t\teprint.Accompaniment = eprintIDToAccompaniment(repoID, eprintID, db, tables)\n\t\t// SkillAreas\n\t\teprint.SkillAreas = eprintIDToSkillAreas(repoID, eprintID, db, tables)\n\t\t// CopyrightHolders\n\t\teprint.CopyrightHolders = eprintIDToCopyrightHolders(repoID, eprintID, db, tables)\n\t\t// Reference\n\t\teprint.Reference = eprintIDToReference(repoID, eprintID, db, tables)\n\n\t\t// ConfCreators\n\t\teprint.ConfCreators = eprintIDToConfCreators(repoID, eprintID, db, tables)\n\t\t// AltTitle\n\t\teprint.AltTitle = eprintIDToAltTitle(repoID, eprintID, db, tables)\n\t\t// PatentAssignee\n\t\teprint.PatentAssignee = eprintIDToPatentAssignee(repoID, eprintID, db, tables)\n\t\t// RelatedPatents\n\t\teprint.RelatedPatents = eprintIDToRelatedPatents(repoID, eprintID, db, tables)\n\t\t// Divisions\n\t\teprint.Divisions = eprintIDToDivisions(repoID, eprintID, db, tables)\n\t\t// ThesisAdvisor\n\t\teprint.ThesisAdvisor = eprintIDToThesisAdvisors(repoID, eprintID, db, tables)\n\t\t// ThesisCommittee\n\t\teprint.ThesisCommittee = eprintIDToThesisCommittee(repoID, eprintID, db, tables)\n\n\t\t// OptionMajor\n\t\teprint.OptionMajor = eprintIDToOptionMajor(repoID, eprintID, db, tables)\n\t\t// OptionMinor\n\t\teprint.OptionMinor = eprintIDToOptionMinor(repoID, eprintID, db, tables)\n\n\t\t/*************************************************************\n\t\t NOTE: These are notes about possible original implementation\n\t\t errors or elements that did not survive the upgrade to\n\t\t EPrints 3.3.16\n\n\t\t eprint.LearningLevels (not an item list in EPrints) using LearningLevelText\n\t\t GScholar, skipping not an item list, a 2010 plugin for EPRints 3.2.\n\t\t eprint.GScholar = eprintIDToGScholar(repoID, eprintID, db, tables)\n\t\t Shelves, a plugin, not replicating, not an item list\n\t\t eprint.Shelves = eprintIDToSchelves(repoID, eprintID, db, tables)\n\t\t eprint.PatentClassification is not not an item list, using eprint.PatentClassificationText\n\t\t eprint.OtherURL appears to be an extraneous\n\t\t eprint.CorpContributors apears to be an extraneous\n\t\t*************************************************************/\n\t} else {\n\t\treturn nil, fmt.Errorf(\"not found\")\n\t}\n\n\treturn eprint, nil\n}",
"func SQLCreateEPrint(config *Config, repoID string, ds *DataSource, eprint *EPrint) (int, error) {\n\tvar (\n\t\terr error\n\t)\n\tdb, ok := config.Connections[repoID]\n\tif !ok {\n\t\treturn 0, fmt.Errorf(`no database connection for %s`, repoID)\n\t}\n\t// If eprint id is zero generate a sequence of INSERT statements\n\t// for the record. Others use generate the appropriate History\n\t// records and then delete insert the new record.\n\ttableName := `eprint`\n\n\tif columns, ok := ds.TableMap[tableName]; ok {\n\t\t// Generate an empty row and capture the id created.\n\t\tstmt := `INSERT INTO eprint (eprintid) (SELECT (IFNULL((SELECT eprintid FROM eprint ORDER BY eprintid DESC LIMIT 1), 0) + 1) AS eprintid)`\n\t\t_, err := db.Exec(stmt)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(`SQL error, %q, %s`, stmt, err)\n\t\t}\n\t\tstmt = `SELECT eprintid FROM eprint ORDER BY eprintid DESC LIMIT 1`\n\t\trows, err := db.Query(stmt)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(`SQL error, %q, %s`, stmt, err)\n\t\t}\n\t\tid := 0\n\t\tfor rows.Next() {\n\t\t\tif err := rows.Scan(&id); err != nil {\n\t\t\t\treturn 0, fmt.Errorf(`could not calculate the new eprintid value, %s`, err)\n\t\t\t}\n\t\t}\n\t\trows.Close()\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(`SQL failed to get insert id, %s`, err)\n\t\t}\n\t\teprint.EPrintID = int(id)\n\t\teprint.Dir = makeDirValue(eprint.EPrintID)\n\t\t// FIXME: decide if the is automatic or if this should be\n\t\t// passed in with the data structure.\n\t\t// Generate minimal date and time stamps\n\t\tnow := time.Now()\n\t\tif eprint.Datestamp == \"\" {\n\t\t\teprint.Datestamp = now.Format(timestamp)\n\t\t\teprint.DatestampYear = now.Year()\n\t\t\teprint.DatestampMonth = int(now.Month())\n\t\t\teprint.DatestampDay = now.Day()\n\t\t\teprint.DatestampHour = now.Hour()\n\t\t\teprint.DatestampMinute = now.Minute()\n\t\t\teprint.DatestampSecond = now.Second()\n\t\t} else if dt, err := time.Parse(datestamp, eprint.Datestamp); err == nil {\n\t\t\teprint.DatestampYear = dt.Year()\n\t\t\teprint.DatestampMonth = int(dt.Month())\n\t\t\teprint.DatestampDay = dt.Day()\n\t\t} else if dt, err := time.Parse(timestamp, eprint.Datestamp); err == nil {\n\t\t\teprint.DatestampYear = dt.Year()\n\t\t\teprint.DatestampMonth = int(dt.Month())\n\t\t\teprint.DatestampDay = dt.Day()\n\t\t\teprint.DatestampHour = dt.Hour()\n\t\t\teprint.DatestampMinute = dt.Minute()\n\t\t\teprint.DatestampSecond = dt.Second()\n\t\t}\n\n\t\teprint.LastModified = now.Format(timestamp)\n\t\teprint.LastModifiedYear = now.Year()\n\t\teprint.LastModifiedMonth = int(now.Month())\n\t\teprint.LastModifiedDay = now.Day()\n\t\teprint.LastModifiedHour = now.Hour()\n\t\teprint.LastModifiedMinute = now.Minute()\n\t\teprint.LastModifiedSecond = now.Second()\n\n\t\teprint.StatusChanged = now.Format(timestamp)\n\t\teprint.StatusChangedYear = now.Year()\n\t\teprint.StatusChangedMonth = int(now.Month())\n\t\teprint.StatusChangedDay = now.Day()\n\t\teprint.StatusChangedHour = now.Hour()\n\t\teprint.StatusChangedMinute = now.Minute()\n\t\teprint.StatusChangedSecond = now.Second()\n\n\t\tif eprint.Date != \"\" {\n\t\t\teprint.DateYear, eprint.DateMonth, eprint.DateDay = approxYMD(eprint.Date)\n\t\t}\n\t\tif eprint.ThesisSubmittedDate != \"\" {\n\t\t\teprint.ThesisSubmittedDateYear, eprint.ThesisSubmittedDateMonth, eprint.ThesisSubmittedDateDay = approxYMD(eprint.ThesisSubmittedDate)\n\t\t}\n\t\tif eprint.ThesisDefenseDate != \"\" {\n\t\t\teprint.ThesisDefenseDateYear, eprint.ThesisDefenseDateMonth, eprint.ThesisDefenseDateDay = approxYMD(eprint.ThesisDefenseDate)\n\t\t}\n\t\tif eprint.ThesisApprovedDate != \"\" {\n\t\t\teprint.ThesisApprovedDateYear, eprint.ThesisApprovedDateMonth, eprint.ThesisApprovedDateDay = approxYMD(eprint.ThesisApprovedDate)\n\t\t}\n\t\tif eprint.ThesisPublicDate != \"\" {\n\t\t\teprint.ThesisPublicDateYear, eprint.ThesisPublicDateMonth, eprint.ThesisPublicDateDay = approxYMD(eprint.ThesisPublicDate)\n\t\t}\n\t\tif eprint.GradOfficeApprovalDate != \"\" {\n\t\t\teprint.GradOfficeApprovalDateYear, eprint.GradOfficeApprovalDateMonth, eprint.GradOfficeApprovalDateDay = approxYMD(eprint.GradOfficeApprovalDate)\n\t\t}\n\n\t\t// Step two, write the rest of the date into the main table.\n\t\tcolumnsSQL, values := eprintToColumnsAndValues(eprint, columns, false)\n\t\tstmt = fmt.Sprintf(`REPLACE INTO %s (%s) VALUES (%s)`,\n\t\t\ttableName,\n\t\t\tstrings.Join(columnsSQL, `, `),\n\t\t\tstrings.Join(qmList(len(columnsSQL)), `, `))\n\t\t_, err = db.Exec(stmt, values...)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(`SQL error, %q, %s`, stmt, err)\n\t\t}\n\t}\n\tif eprint.EPrintID != 0 {\n\t\tfor tableName, columns := range ds.TableMap {\n\t\t\t// Handle the remaining tables, i.e. skip eprint table.\n\t\t\tswitch {\n\t\t\tcase tableName == `eprint`:\n\t\t\t\t// Skip eprint table, we've already processed it\n\t\t\tcase tableName == `eprint_keyword`:\n\t\t\t\t// Skip eprint_keyword, our EPrints use keywords (longtext) in eprint table.\n\t\t\tcase strings.HasPrefix(tableName, `document`):\n\t\t\t\t//log.Printf(`FIXME %s columns: %s`, tableName, strings.Join(columns, `, `))\n\t\t\tcase strings.HasPrefix(tableName, `file`):\n\t\t\t\t//log.Printf(`FIXME %s columns: %s`, tableName, strings.Join(columns, `, `))\n\t\t\tdefault:\n\t\t\t\t// Insert new rows in associated table\n\t\t\t\tif err := insertItemList(db, repoID, tableName, columns, eprint); err != nil {\n\t\t\t\t\treturn eprint.EPrintID, fmt.Errorf(`failed to insert eprintid %d in table %s for %s, %s`, eprint.EPrintID, tableName, repoID, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn eprint.EPrintID, nil\n\t}\n\treturn 0, err\n}",
"func GetEPrintIDsForYear(config *Config, repoID string, year int) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE date_type = \"published\" AND date_year = ? ORDER BY date_year DESC, date_month DESC, date_day DESC`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, year)\n}",
"func GetEPrintIDsForDateType(config *Config, repoID string, dateType string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint\nWHERE ((date_type) = ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 1), 2, \"0\")) >= ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 28), 2, \"0\")) <= ?)\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, dateType, start, end)\n}",
"func GetEPrintIDsInTimestampRange(config *Config, repoID string, field string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 59), 2, \"0\")) <= ?)\nORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,\n\t\tfield, field, field, field, field, field, field, field, field, field, field, field,\n\t\tfield, field, field, field, field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, start, end)\n}",
"func GetAllEPrintIDsWithStatus(config *Config, repoID string, status string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprintid FROM eprint WHERE (eprint_status = ?) ORDER BY date_year DESC, date_month DESC, date_day DESC`, status)\n}",
"func GetEPrintIDsWithStatus(config *Config, repoID string, status string, start string, end string) ([]int, error) {\n\tstmt := `SELECT eprintid FROM eprint WHERE (eprint_status = ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(lastmod_year, \"-\",\nLPAD(IFNULL(lastmod_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(lastmod_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(lastmod_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(lastmod_second, 59), 2, \"0\")) <= ?)\nORDER BY lastmod_year DESC, lastmod_month DESC, lastmod_day DESC,\n lastmod_hour DESC, lastmod_minute DESC, lastmod_minute DESC`\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func GetEPrintIDsWithStatusInTimestampRange(config *Config, repoID string, status string, field string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE\n(eprint_status = ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 1), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 0), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 0), 2, \"0\")) >= ?) AND\n(CONCAT(%s_year, \"-\",\nLPAD(IFNULL(%s_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(%s_day, 28), 2, \"0\"), \" \",\nLPAD(IFNULL(%s_hour, 23), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_minute, 59), 2, \"0\"), \":\",\nLPAD(IFNULL(%s_second, 59), 2, \"0\")) <= ?)\nORDER BY %s_year DESC, %s_month DESC, %s_day DESC, %s_hour DESC, %s_minute DESC, %s_second DESC`,\n\t\tfield, field, field, field, field, field,\n\t\tfield, field, field, field, field, field,\n\t\tfield, field, field, field, field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, start, end)\n}",
"func GetEPrintIDsForPersonOrOrgID(config *Config, repoID string, personOrOrgType string, personOrOrgID string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprint_%s_id.eprintid AS eprintid\nFROM eprint_%s_id JOIN eprint ON (eprint_%s_id.eprintid = eprint.eprintid)\nWHERE eprint_%s_id.%s_id = ?\nORDER BY date_year DESC, date_month DESC, date_day DESC`,\n\t\tpersonOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType, personOrOrgType)\n\treturn sqlQueryIntIDs(config, repoID, stmt, personOrOrgID)\n}",
"func GetEPrintIDsForUniqueID(config *Config, repoID string, field string, value string) ([]int, error) {\n\t// NOTE: There should only be one eprint per DOI but we have dirty data because the field is not contrained as Unique\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint WHERE LOWER(%s) = LOWER(?)`, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, value)\n}",
"func GetEPrintIDsWithStatusForDateType(config *Config, repoID string, status string, dateType string, start string, end string) ([]int, error) {\n\tstmt := fmt.Sprintf(`SELECT eprintid FROM eprint\nWHERE (eprint_status = ? ) AND (date_type = ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 1), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 1), 2, \"0\")) >= ?) AND\n(CONCAT(date_year, \"-\",\nLPAD(IFNULL(date_month, 12), 2, \"0\"), \"-\",\nLPAD(IFNULL(date_day, 28), 2, \"0\")) <= ?)\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`)\n\treturn sqlQueryIntIDs(config, repoID, stmt, status, dateType, start, end)\n}",
"func ImportE(t cbtest.T, systemPath string, extraPaths ...string) (*EphemeralSystem, error) {\n\tt.Helper()\n\n\tconfig, err := config.ObtainConfig(t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ImportWithConfigE(t, config, systemPath, extraPaths...)\n}",
"func GetEPrintIDsForPersonName(config *Config, repoID, field string, family string, given string) ([]int, error) {\n\tconditions := []string{}\n\tif strings.Contains(family, \"*\") || strings.Contains(given, \"%\") {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_family LIKE ?`, field))\n\t} else if family != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_family = ?`, field))\n\t}\n\tif strings.Contains(given, \"*\") || strings.Contains(given, \"%\") {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_given LIKE ?`, field))\n\t} else if given != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(`%s_given = ?`, field))\n\t}\n\tstmt := fmt.Sprintf(`SELECT eprint.eprintid AS eprintid\nFROM eprint_%s JOIN eprint ON (eprint_%s.eprintid = eprint.eprintid)\nWHERE %s\nORDER BY %s_family ASC, %s_given ASC, eprint.date_year DESC, eprint.date_month DESC, eprint.date_day DESC`,\n\t\tfield, field, strings.Join(conditions, \" AND \"), field, field)\n\treturn sqlQueryIntIDs(config, repoID, stmt, family, given)\n}",
"func insertItemList(db *sql.DB, repoID string, tableName string, columns []string, eprint *EPrint) error {\n\tvar (\n\t\titemList ItemsInterface\n\t)\n\teprintid := eprint.EPrintID\n\tswitch {\n\tcase strings.HasPrefix(tableName, `eprint_creators_`):\n\t\titemList = eprint.Creators\n\tcase strings.HasPrefix(tableName, `eprint_editors_`):\n\t\titemList = eprint.Editors\n\tcase strings.HasPrefix(tableName, `eprint_contributors_`):\n\t\titemList = eprint.Contributors\n\tcase strings.HasPrefix(tableName, `eprint_corp_creators`):\n\t\titemList = eprint.CorpCreators\n\tcase strings.HasPrefix(tableName, `eprint_corp_contributors_`):\n\t\titemList = eprint.CorpContributors\n\tcase strings.HasPrefix(tableName, `eprint_thesis_advisor_`):\n\t\titemList = eprint.ThesisAdvisor\n\tcase strings.HasPrefix(tableName, `eprint_thesis_committee_`):\n\t\titemList = eprint.ThesisCommittee\n\tcase strings.HasPrefix(tableName, `eprint_item_issues_`):\n\t\titemList = eprint.ItemIssues\n\tcase strings.HasPrefix(tableName, `eprint_alt_title`):\n\t\titemList = eprint.AltTitle\n\tcase strings.HasPrefix(tableName, `eprint_conductors`):\n\t\titemList = eprint.Conductors\n\tcase strings.HasPrefix(tableName, `eprint_conf_creators_`):\n\t\titemList = eprint.ConfCreators\n\tcase strings.HasPrefix(tableName, `eprint_exhibitors_`):\n\t\titemList = eprint.Exhibitors\n\tcase strings.HasPrefix(tableName, `eprint_producers_`):\n\t\titemList = eprint.Producers\n\tcase strings.HasPrefix(tableName, `eprint_lyricists_`):\n\t\titemList = eprint.Lyricists\n\tcase strings.HasPrefix(tableName, `eprint_accompaniment`):\n\t\titemList = eprint.Accompaniment\n\tcase strings.HasPrefix(tableName, `eprint_subjec`):\n\t\titemList = eprint.Subjects\n\tcase strings.HasPrefix(tableName, `eprint_local_`):\n\t\titemList = eprint.LocalGroup\n\tcase strings.HasPrefix(tableName, `eprint_div`):\n\t\titemList = eprint.Divisions\n\tcase strings.HasPrefix(tableName, `eprint_option_maj`):\n\t\titemList = eprint.OptionMajor\n\tcase strings.HasPrefix(tableName, `eprint_option_min`):\n\t\titemList = eprint.OptionMinor\n\tcase strings.HasPrefix(tableName, `eprint_funders_`):\n\t\titemList = eprint.Funders\n\tcase strings.HasPrefix(tableName, `eprint_funders`):\n\t\t// Ignore, eprint_funders is empty in CaltechAUTHORS ...\n\t\titemList = new(FunderItemList)\n\tcase strings.HasPrefix(tableName, `eprint_other_numbering_system`):\n\t\titemList = eprint.OtherNumberingSystem\n\tcase strings.HasPrefix(tableName, `eprint_projects`):\n\t\titemList = eprint.Projects\n\tcase strings.HasPrefix(tableName, `eprint_referencetext`):\n\t\titemList = eprint.ReferenceText\n\tcase strings.HasPrefix(tableName, `eprint_related_url`):\n\t\titemList = eprint.RelatedURL\n\tcase strings.HasPrefix(tableName, `eprint_skill_areas`):\n\t\titemList = eprint.SkillAreas\n\tcase strings.HasPrefix(tableName, `eprint_patent_assignee`):\n\t\titemList = eprint.PatentAssignee\n\tcase strings.HasPrefix(tableName, `eprint_related_patents`):\n\t\titemList = eprint.RelatedPatents\n\tcase strings.HasPrefix(tableName, `eprint_referencetext`):\n\t\titemList = eprint.ReferenceText\n\tcase strings.HasPrefix(tableName, `eprint_accompaniment`):\n\t\titemList = eprint.Accompaniment\n\tcase strings.HasPrefix(tableName, `eprint_reference`):\n\t\titemList = eprint.Reference\n\tcase strings.HasPrefix(tableName, `eprint_copyright_holders`):\n\t\titemList = eprint.CopyrightHolders\n\tcase strings.HasPrefix(tableName, `eprint_related_patent`):\n\t\titemList = eprint.RelatedPatents\n\tcase strings.HasPrefix(tableName, `eprint_parent_assign`):\n\t\titemList = eprint.PatentAssignee\n\tcase strings.HasPrefix(tableName, `eprint_skill`):\n\t\titemList = eprint.SkillAreas\n\tcase strings.HasPrefix(tableName, `eprint_relation`):\n\t\t// NOTE: This is not the same as document_relation_*, it is a separate item list item list\n\t\t// it has the same structure with a uri and type. Our eprint implementations use a Relation\n\t\titemList = eprint.Relation\n\tcase strings.HasPrefix(tableName, `eprint_keyword`):\n\t\t// NOTE: this we appear to use the longtext of key in our eprint table. Not sure if this\n\t\t// is new or old structure. It is posssible that our longtext for keywords is a legacy structure.\n\t\t// itemList = eprint.Keyword\n\tdefault:\n\t\treturn fmt.Errorf(`do not understand table %q, columns %s`, tableName, strings.Join(columns, `, `))\n\t}\n\t// Clear the list, then insert\n\tstmt := fmt.Sprintf(`DELETE FROM %s WHERE eprintid = ?`, tableName)\n\t_, err := db.Exec(stmt, eprint.EPrintID)\n\tif err != nil {\n\t\treturn fmt.Errorf(`SQL error, %q, %s`, stmt, err)\n\t}\n\tfor pos := 0; pos < itemList.Length(); pos++ {\n\t\titem := itemList.IndexOf(pos)\n\t\titem.Pos = pos\n\t\tvalues := []interface{}{}\n\t\tcolumnsSQL := []string{}\n\t\tfor _, col := range columns {\n\t\t\tswitch {\n\t\t\tcase col == `eprintid`:\n\t\t\t\tvalues = append(values, eprintid)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase col == `pos`:\n\t\t\t\tvalues = append(values, pos)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_id`):\n\t\t\t\tvalues = append(values, item.ID)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_type`):\n\t\t\t\tvalues = append(values, item.Type)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_family`):\n\t\t\t\tvalues = append(values, item.Name.Family)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_given`):\n\t\t\t\tvalues = append(values, item.Name.Given)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_honourific`):\n\t\t\t\tvalues = append(values, item.Name.Honourific)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_lineage`):\n\t\t\t\tvalues = append(values, item.Name.Lineage)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_name`):\n\t\t\t\tvalues = append(values, item.Name.Value)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_show_email`):\n\t\t\t\t// NOTE: _show_email needs to be tested before _email\n\t\t\t\tvalues = append(values, item.ShowEMail)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_email`):\n\t\t\t\t// NOTE: _show_email needs to be tested before _email\n\t\t\t\tvalues = append(values, item.EMail)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_role`):\n\t\t\t\tvalues = append(values, item.Role)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_url`):\n\t\t\t\tvalues = append(values, item.URL)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `description`):\n\t\t\t\tvalues = append(values, item.Description)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_agency`):\n\t\t\t\tvalues = append(values, item.Agency)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_grant_number`):\n\t\t\t\tvalues = append(values, item.GrantNumber)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_uri`):\n\t\t\t\tvalues = append(values, item.URI)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_orcid`):\n\t\t\t\tvalues = append(values, item.ORCID)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_ror`):\n\t\t\t\tvalues = append(values, item.ROR)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_timestamp`):\n\t\t\t\tvalues = append(values, item.Timestamp)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_status`):\n\t\t\t\tvalues = append(values, item.Status)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_reported_by`):\n\t\t\t\tvalues = append(values, item.ReportedBy)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_resolved_by`):\n\t\t\t\tvalues = append(values, item.ResolvedBy)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_comment`):\n\t\t\t\tvalues = append(values, item.ResolvedBy)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_group`):\n\t\t\t\tvalues = append(values, item.Value)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_subjects`):\n\t\t\t\tvalues = append(values, item.Value)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_major`):\n\t\t\t\tvalues = append(values, item.Value)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_minor`):\n\t\t\t\tvalues = append(values, item.Value)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `_holders`):\n\t\t\t\tvalues = append(values, item.Value)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `divisions`):\n\t\t\t\tvalues = append(values, item.Value)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `subjects`):\n\t\t\t\tvalues = append(values, item.Value)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `referencetext`):\n\t\t\t\tvalues = append(values, item.Value)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `accompaniment`):\n\t\t\t\tvalues = append(values, item.Value)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `related_patents`):\n\t\t\t\tvalues = append(values, item.Value)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `patent_assignee`):\n\t\t\t\tvalues = append(values, item.Value)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `skill_areas`):\n\t\t\t\tvalues = append(values, item.Value)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tcase strings.HasSuffix(col, `alt_title`):\n\t\t\t\tvalues = append(values, item.Value)\n\t\t\t\tcolumnsSQL = append(columnsSQL, col)\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"do not understand column %s.%s\\n\", tableName, col)\n\t\t\t}\n\t\t}\n\t\tstmt := fmt.Sprintf(`INSERT INTO %s (%s) VALUES (%s)`, tableName, strings.Join(columnsSQL, `, `), strings.Join(qmList(len(columnsSQL)), `, `))\n\t\t_, err := db.Exec(stmt, values...)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(`SQL error, %q, %s`, stmt, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func DumpArticlesJob() {\n\tlogger := logger.Logger{}\n\tlogger.INFO(\"Starting dump articles job\")\n\tarticles, err := schemas.GetArticles(db.DB, bson.D{})\n\tif err != nil {\n\t\tlogger.ERROR(\"Unable to fetch article data\")\n\t}\n\tcsvFile, err := os.Create(\"article-dump.csv\")\n\tif err != nil {\n\t\tlogger.ERROR(\"Unable to fetch article data\")\n\t}\n\tdefer csvFile.Close()\n\twriter := csv.NewWriter(csvFile)\n\tdefer writer.Flush()\n\theaderRow := []string{\n\t\t\"title\",\n\t\t\"url\",\n\t\t\"description\",\n\t\t\"content\",\n\t\t\"feedDescription\",\n\t\t\"feedTitle\",\n\t\t\"feedUrl\",\n\t}\n\terr = writer.Write(headerRow)\n\tif err != nil {\n\t\tlogger.ERROR(\"Unable to fetch article data\")\n\t}\n\tfor i := range articles {\n\t\tdataRow := []string{\n\t\t\tarticles[i].Title,\n\t\t\tarticles[i].URL,\n\t\t\tarticles[i].Description,\n\t\t\tarticles[i].Content,\n\t\t\tarticles[i].FeedDescription,\n\t\t\tarticles[i].FeedTitle,\n\t\t\tarticles[i].FeedURL,\n\t\t\tstrconv.Itoa(int(articles[i].CreatedAt)),\n\t\t}\n\t\terr = writer.Write(dataRow)\n\t\tif err != nil {\n\t\t\tlogger.ERROR(\"Unable to fetch article data\")\n\t\t}\n\t}\n\tlogger.SUCCESS(\"Job Done\")\n}",
"func (p *Printer) GetAndPrintObjects(endpoint string, r StringReader, argRe *regexp.Regexp) error {\n\tif argRe != nil {\n\t\tr = NewFilteredStringReader(r, argRe)\n\t}\n\n\tfilteredArgs := make([]string, 0)\n\tfor s, err := r.ReadString(); s != \"\" || err == nil; s, err = r.ReadString() {\n\t\tfilteredArgs = append(filteredArgs, s)\n\t}\n\n\tobjectsCh := make(chan *vt.Object)\n\terrorsCh := make(chan error, len(filteredArgs))\n\n\tgo p.client.RetrieveObjects(endpoint, filteredArgs, objectsCh, errorsCh)\n\n\tif viper.GetBool(\"identifiers-only\") {\n\t\tvar objectIds []string\n\t\tfor obj := range objectsCh {\n\t\t\tobjectIds = append(objectIds, obj.ID())\n\t\t}\n\t\tif err := p.Print(objectIds); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tvar objects []*vt.Object\n\t\tfor obj := range objectsCh {\n\t\t\tobjects = append(objects, obj)\n\t\t}\n\t\tif err := p.PrintObjects(objects); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor err := range errorsCh {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t}\n\n\treturn nil\n}",
"func Import(ctx context.Context, changes []string, message, reference, u *string, r io.Reader) (*entities.ImageImportReport, error) {\n\tvar report entities.ImageImportReport\n\tif r != nil && u != nil {\n\t\treturn nil, errors.New(\"url and r parameters cannot be used together\")\n\t}\n\tconn, err := bindings.GetClient(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tparams := url.Values{}\n\tfor _, change := range changes {\n\t\tparams.Add(\"changes\", change)\n\t}\n\tif message != nil {\n\t\tparams.Set(\"message\", *message)\n\t}\n\tif reference != nil {\n\t\tparams.Set(\"reference\", *reference)\n\t}\n\tif u != nil {\n\t\tparams.Set(\"url\", *u)\n\t}\n\tresponse, err := conn.DoRequest(r, http.MethodPost, \"/images/import\", params, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &report, response.Process(&report)\n}",
"func GetEPrintIDsForORCID(config *Config, repoID string, orcid string) ([]int, error) {\n\treturn sqlQueryIntIDs(config, repoID, `SELECT eprint.eprintid AS eprintid\nFROM eprint_creators_orcid JOIN eprint ON (eprint_creators_orcid.eprintid = eprint.eprintid)\nWHERE creators_orcid = ?\nORDER BY date_year DESC, date_month DESC, date_day DESC\n`, orcid)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
EncodeNode Converts a node to labels. nodes > labels. | func EncodeNode(node *Node) map[string]string {
labels := make(map[string]string)
encodeNode(labels, node.Name, node)
return labels
} | [
"func LabelNode(nodeName, key, value string) (*corev1.Node, error) {\n\tNodeObject, err := client.Client.Nodes().Get(context.Background(), nodeName, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tNodeObject.Labels[key] = value\n\tNodeObject, err = client.Client.Nodes().Update(context.Background(), NodeObject, metav1.UpdateOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NodeObject, nil\n}",
"func (n *nodeStorage) labelNode(key, value string) error {\n\tnodeList, err := n.client.Nodes().List(api.ListOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, node := range nodeList.Items {\n\t\t// TODO: If the node has the label but isn't ready, try to find another?\n\t\tif !nodeReady(node) {\n\t\t\tlog.Printf(\"Ignoring not-ready node %v\", node.Name)\n\t\t\tcontinue\n\t\t}\n\n\t\tif v, ok := node.Labels[key]; ok {\n\t\t\tif v == value {\n\t\t\t\tlog.Printf(\"Node %v already labelled with %v\", node.Name, v)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Printf(\"Labelling node %v %v=%v\", node.Name, key, value)\n\t\tnode.Labels[key] = value\n\t\tif _, err := n.client.Nodes().Update(&node); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"Failed to find a node without key %v\", key)\n}",
"func NodeId_encodeBinaryWithEncodingMask(src []UA_NodeId, encoding u8, ctx []Ctx) status {\n\tvar ret status\n\tswitch uint32(int((src[0].identifierType))) {\n\tcase uint32(int((UA_NODEIDTYPE_NUMERIC))):\n\t\tif UA_UInt32((*src[0].identifier.numeric())) > UA_UInt32((uint32_t((uint32((65535)))))) || int(uint16((uint16((uint16_t((UA_UInt16(src[0].namespaceIndex)))))))) > 255 {\n\t\t\tencoding |= u8((UA_Byte((uint8_t((__uint8_t((uint8(2)))))))))\n\t\t\tret |= Byte_encodeBinary((*[100000000]u8)(unsafe.Pointer(&encoding))[:], nil, ctx)\n\t\t\tret |= UInt16_encodeBinary((*[100000000]UA_UInt16)(unsafe.Pointer(&src[0].namespaceIndex))[:], nil, ctx)\n\t\t\tret |= UInt32_encodeBinary((*[100000000]UA_UInt32)(unsafe.Pointer(&(*src[0].identifier.numeric())))[:], nil, ctx)\n\t\t} else if UA_UInt32((*src[0].identifier.numeric())) > UA_UInt32((uint32_t((uint32((255)))))) || int(uint16((uint16((uint16_t((UA_UInt16(src[0].namespaceIndex)))))))) > 0 {\n\t\t\tencoding |= u8((UA_Byte((uint8_t((__uint8_t((uint8(1)))))))))\n\t\t\tret |= Byte_encodeBinary((*[100000000]u8)(unsafe.Pointer(&encoding))[:], nil, ctx)\n\t\t\tvar nsindex u8 = u8(UA_UInt16(src[0].namespaceIndex))\n\t\t\tret |= Byte_encodeBinary((*[100000000]u8)(unsafe.Pointer(&nsindex))[:], nil, ctx)\n\t\t\tvar identifier16 u16 = u16(UA_UInt32((*src[0].identifier.numeric())))\n\t\t\tret |= UInt16_encodeBinary((*[100000000]u16)(unsafe.Pointer(&identifier16))[:], nil, ctx)\n\t\t} else {\n\t\t\tencoding |= u8((UA_Byte((uint8_t((__uint8_t((uint8(0)))))))))\n\t\t\tret |= Byte_encodeBinary((*[100000000]u8)(unsafe.Pointer(&encoding))[:], nil, ctx)\n\t\t\tvar identifier8 u8 = u8(UA_UInt32((*src[0].identifier.numeric())))\n\t\t\tret |= Byte_encodeBinary((*[100000000]u8)(unsafe.Pointer(&identifier8))[:], nil, ctx)\n\t\t}\n\tcase uint32(int((UA_NODEIDTYPE_STRING))):\n\t\tencoding |= u8((UA_Byte((uint8_t((__uint8_t((uint8(int((UA_NODEIDTYPE_STRING)))))))))))\n\t\tret |= Byte_encodeBinary((*[100000000]u8)(unsafe.Pointer(&encoding))[:], nil, ctx)\n\t\tret |= UInt16_encodeBinary((*[100000000]UA_UInt16)(unsafe.Pointer(&src[0].namespaceIndex))[:], nil, ctx)\n\t\tif ret != status((UA_StatusCode((uint32_t((uint32((0)))))))) {\n\t\t\treturn status(ret)\n\t\t}\n\t\tret = String_encodeBinary((*[100000000]UA_String)(unsafe.Pointer(&(*src[0].identifier.string())))[:], nil, ctx)\n\tcase uint32(int((UA_NODEIDTYPE_GUID))):\n\t\tencoding |= u8((UA_Byte((uint8_t((__uint8_t((uint8(int((UA_NODEIDTYPE_GUID)))))))))))\n\t\tret |= Byte_encodeBinary((*[100000000]u8)(unsafe.Pointer(&encoding))[:], nil, ctx)\n\t\tret |= UInt16_encodeBinary((*[100000000]UA_UInt16)(unsafe.Pointer(&src[0].namespaceIndex))[:], nil, ctx)\n\t\tret |= Guid_encodeBinary((*[100000000]UA_Guid)(unsafe.Pointer(&(*src[0].identifier.guid())))[:], nil, ctx)\n\tcase uint32(int((UA_NODEIDTYPE_BYTESTRING))):\n\t\tencoding |= u8((UA_Byte((uint8_t((__uint8_t((uint8(int((UA_NODEIDTYPE_BYTESTRING)))))))))))\n\t\tret |= Byte_encodeBinary((*[100000000]u8)(unsafe.Pointer(&encoding))[:], nil, ctx)\n\t\tret |= UInt16_encodeBinary((*[100000000]UA_UInt16)(unsafe.Pointer(&src[0].namespaceIndex))[:], nil, ctx)\n\t\tif ret != status((UA_StatusCode((uint32_t((uint32((0)))))))) {\n\t\t\treturn status(ret)\n\t\t}\n\t\t// ByteString\n\t\tret = String_encodeBinary((*[100000000]UA_ByteString)(unsafe.Pointer(&(*src[0].identifier.byteString())))[:], nil, ctx)\n\tdefault:\n\t\t{\n\t\t\treturn status((UA_StatusCode((uint32_t((uint32((uint32(2147614720)))))))))\n\t\t}\n\t}\n\treturn status(ret)\n}",
"func UA_NodeClass_encodeBinary(src []int, bufPos [][]UA_Byte, bufEnd [][]UA_Byte) UA_StatusCode {\n\treturn UA_encodeBinary(src, (*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[75]))[:], bufPos, bufEnd, nil, nil)\n}",
"func LabelNode(t *testing.T, cs *framework.ClientSet, node corev1.Node, label string) func() {\n\tctx := context.Background()\n\n\terr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {\n\t\tn, err := cs.CoreV1Interface.Nodes().Get(ctx, node.Name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tn.Labels[label] = \"\"\n\t\t_, err = cs.CoreV1Interface.Nodes().Update(ctx, n, metav1.UpdateOptions{})\n\t\treturn err\n\t})\n\n\trequire.Nil(t, err, \"unable to label %s node %s with infra: %s\", label, node.Name, err)\n\n\treturn MakeIdempotent(func() {\n\n\t\terr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {\n\t\t\tn, err := cs.CoreV1Interface.Nodes().Get(ctx, node.Name, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdelete(n.Labels, label)\n\t\t\t_, err = cs.CoreV1Interface.Nodes().Update(ctx, n, metav1.UpdateOptions{})\n\t\t\treturn err\n\t\t})\n\t\trequire.Nil(t, err, \"unable to remove label %q from node %q: %s\", label, node.Name, err)\n\t})\n}",
"func nodeToBytes(n *html.Node) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tw := io.Writer(&buf)\n\terr := html.Render(w, n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}",
"func LabelNodes(labelNodes []string, op, labelK, labelV string) {\n\n\tfor _, nodeName := range labelNodes {\n\t\tif nodeName == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tnodeInCache, err := KubernetesClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})\n\t\tnode := nodeInCache.DeepCopy()\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Failed to get the node %s due to %v\", nodeName, err)\n\t\t} else {\n\t\t\toldData, _ := json.Marshal(node)\n\t\t\tlabels := node.GetLabels()\n\t\t\tif labels == nil {\n\t\t\t\tlabels = make(map[string]string)\n\t\t\t}\n\t\t\tswitch op {\n\t\t\tcase utils.OperationAdd:\n\t\t\t\tlabels[labelK] = labelV\n\t\t\tcase utils.OperationRemove:\n\t\t\t\tdelete(labels, labelK)\n\t\t\t}\n\t\t\tnode.SetLabels(labels)\n\t\t\tnewJSON, _ := json.Marshal(node)\n\t\t\tpatchbytes, _ := strategicpatch.CreateTwoWayMergePatch(oldData, newJSON, v1.Node{})\n\t\t\tif len(patchbytes) > 2 {\n\t\t\t\t_, err = KubernetesClient.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchbytes)\n\t\t\t\tif err != nil {\n\t\t\t\t\tglog.Errorf(\"Failed labeling node %s due to %v\", node.Name, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}",
"func (c *APIClient) NodeLabels(nodeName string) (map[string]string, error) {\n\tnode, err := c.Cl.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn node.Labels, nil\n}",
"func (c *Connection) WriteNode(item gosmparse.Node) error {\n\n\t// encode id\n\tkey := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(key, uint64(item.ID))\n\n\t// prepend node identifier\n\tkey = append(prefix[\"node\"], key...)\n\n\t// encode item\n\tvalue, err := msgpack.Marshal(item)\n\tif err != nil {\n\t\tlog.Println(\"encode failed\", err)\n\t\treturn err\n\t}\n\n\t// write to db\n\terr = c.DB.Put(key, value, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func EncodeConfig(config *NodeConfig) ([]byte, error) {\n\tcfgPb := NodeConfigPb{}\n\tif config.Current != nil {\n\t\tcfgPb.Current = marshalNodeList(config.Current)\n\t}\n\tif config.Previous != nil {\n\t\tcfgPb.Previous = marshalNodeList(config.Previous)\n\t}\n\n\treturn proto.Marshal(&cfgPb)\n}",
"func (n *Node) MarshalJSON() ([]byte, error) {\n\tm := map[string]string{\"ID\": n.ID}\n\tif n.Left != nil {\n\t\tm[\"Left\"] = n.Left.ID\n\t}\n\tif n.Right != nil {\n\t\tm[\"Right\"] = n.Right.ID\n\t}\n\tif n.Value != \"\" {\n\t\tm[\"Value\"] = n.Value\n\t}\n\treturn json.Marshal(m)\n}",
"func (n *Node) ToBytes(buf *bytes.Buffer) error {\n\tenc := gob.NewEncoder(buf)\n\tif err := enc.Encode(n.Host); err != nil {\n\t\treturn err\n\t}\n\tif err := enc.Encode(n.Name); err != nil {\n\t\treturn err\n\t}\n\treturn enc.Encode(n.Options)\n}",
"func (k *Kubernetes) SetNodeLabels(_ executortypes.NodeLabelSetting, hosts []string, labels map[string]string) error {\n\t// contents in 'hosts' maybe hostname or internalIP, it should be unified into hostname\n\tnodes, err := k.k8sClient.ClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})\n\tif err != nil {\n\t\tlogrus.Errorf(\"failed to list nodes: %v\", err)\n\t\treturn err\n\t}\n\tupdatedHosts := make([]string, 0)\n\tfor _, host := range hosts {\n\t\tfor _, node := range nodes.Items {\n\t\t\tadd := false\n\t\t\tfor _, addr := range node.Status.Addresses {\n\n\t\t\t\tif addr.Address == host {\n\t\t\t\t\tadd = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif add {\n\t\t\t\tupdatedHosts = append(updatedHosts, node.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, host := range updatedHosts {\n\t\tprefixedLabels := map[string]*string{}\n\t\tnode, err := k.k8sClient.ClientSet.CoreV1().Nodes().Get(context.Background(), host,\n\t\t\tmetav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// 1. unset all 'dice/' labels\n\t\tfor k := range node.Labels {\n\t\t\tif !strutil.HasPrefixes(k, labelconfig.K8SLabelPrefix) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tprefixedLabels[k] = nil\n\t\t}\n\n\t\t// 2. set labels in param 'labels'\n\t\tfor k := range labels {\n\t\t\tv := labels[k]\n\t\t\tprefixedKey := k\n\t\t\tif !strutil.HasPrefixes(prefixedKey, labelconfig.K8SLabelPrefix) {\n\t\t\t\tprefixedKey = strutil.Concat(labelconfig.K8SLabelPrefix, k)\n\t\t\t}\n\t\t\tprefixedLabels[prefixedKey] = &v\n\t\t}\n\n\t\t// 3. set them\n\t\tvar patch struct {\n\t\t\tMetadata struct {\n\t\t\t\tLabels map[string]*string `json:\"labels\"` // Use '*string' to cover 'null' case\n\t\t\t} `json:\"metadata\"`\n\t\t}\n\n\t\tpatch.Metadata.Labels = prefixedLabels\n\t\tpatchData, err := json.Marshal(patch)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif _, err = k.k8sClient.ClientSet.CoreV1().Nodes().Patch(context.Background(), host, types.MergePatchType,\n\t\t\tpatchData, metav1.PatchOptions{}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func ConvertNode(n Node) TestNode {\n\tvar children []TestNode\n\tfor c := n.FirstChild(); c != Nil; c = c.Next() {\n\t\tchildren = append(children, ConvertNode(c))\n\t}\n\tk := n.Kind().String()\n\tif n.Kind().IsLiteral() {\n\t\tk = n.Text()\n\t}\n\treturn TestNode{\n\t\tKind: k,\n\t\tChildren: children,\n\t}\n}",
"func (m *CoreNamespaceMigration) addNodeLabel(ctx context.Context, nodeName, key, value string) error {\n\treturn wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {\n\t\tnode, err := m.client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tneedUpdate := true\n\t\tif curr, ok := node.Labels[key]; ok && curr == value {\n\t\t\tneedUpdate = false\n\t\t}\n\n\t\tk := strings.Replace(key, \"/\", \"~1\", -1)\n\n\t\tlp := []StringPatch{{\n\t\t\tOp: \"add\",\n\t\t\tPath: fmt.Sprintf(\"/metadata/labels/%s\", k),\n\t\t\tValue: value,\n\t\t}}\n\n\t\tpatchBytes, err := json.Marshal(lp)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tif needUpdate {\n\t\t\t_, err := m.client.CoreV1().Nodes().Patch(ctx, node.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{})\n\t\t\tif err == nil {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\tif !apierrs.IsConflict(err) {\n\t\t\t\treturn false, err\n\t\t\t}\n\n\t\t\t// Retry on update conflicts.\n\t\t\treturn false, nil\n\t\t}\n\n\t\t// no update needed\n\t\treturn true, nil\n\t})\n}",
"func ExpandedNodeId_encodeBinary(src []UA_ExpandedNodeId, type_ []UA_DataType, ctx []Ctx) status {\n\tvar encoding u8\n\tif src[0].namespaceUri.data > 1 {\n\t\t// Set up the encoding mask\n\t\tencoding |= u8((UA_Byte((uint8_t((__uint8_t((uint8(128)))))))))\n\t}\n\tif UA_UInt32(src[0].serverIndex) > UA_UInt32((uint32_t((uint32((0)))))) {\n\t\tencoding |= u8((UA_Byte((uint8_t((__uint8_t((uint8(64)))))))))\n\t}\n\tvar ret status = NodeId_encodeBinaryWithEncodingMask((*[100000000]UA_NodeId)(unsafe.Pointer(&src[0].nodeId))[:], u8(encoding), ctx)\n\tif ret != status((UA_StatusCode((uint32_t((uint32((0)))))))) {\n\t\t// Encode the NodeId\n\t\treturn status(ret)\n\t}\n\tif src[0].namespaceUri.data > 1 {\n\t\t// Encode the namespace. Do not return\n\t\t// * UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED afterwards.\n\t\tret = String_encodeBinary((*[100000000]UA_String)(unsafe.Pointer(&src[0].namespaceUri))[:], nil, ctx)\n\t\tif ret != status((UA_StatusCode((uint32_t((uint32((0)))))))) {\n\t\t\treturn status(ret)\n\t\t}\n\t}\n\tif UA_UInt32(src[0].serverIndex) > UA_UInt32((uint32_t((uint32((0)))))) {\n\t\t// Encode the serverIndex\n\t\tret = encodeWithExchangeBuffer((*[100000000]UA_UInt32)(unsafe.Pointer(&src[0].serverIndex))[:], encodeBinarySignature(UInt32_encodeBinary), ctx)\n\t}\n\treturn status(ret)\n}",
"func SerializeNodeInfo(nodeinfo *node.Info) []byte {\n\tvar result bytes.Buffer\n\tencoder := gob.NewEncoder(&result)\n\terr := encoder.Encode(nodeinfo)\n\tif err != nil {\n\t\tlog.Error(\"Could not serialize node info\", err)\n\t}\n\treturn result.Bytes()\n}",
"func NodeId_encodeBinary(src []UA_NodeId, type_ []UA_DataType, ctx []Ctx) status {\n\treturn NodeId_encodeBinaryWithEncodingMask(src, u8(0), ctx)\n}",
"func encodeGRPCRegisterNodeRequest(_ context.Context, request interface{}) (interface{}, error) {\n\treq := request.(endpoint.RegisterNodeRequest)\n\treturn &pb.RegisterNodeRequest{Name: req.Name, NodeIP: req.IP, NodePort: req.Port}, nil\n}",
"func UA_ReferenceNode_encodeBinary(src []UA_ReferenceNode, bufPos [][]UA_Byte, bufEnd [][]UA_Byte) UA_StatusCode {\n\treturn UA_encodeBinary(src, (*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[64]))[:], bufPos, bufEnd, nil, nil)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assert no error occurred | func assertNoErr(err error, t *testing.T) {
if err != nil {
t.Errorf(err.Error())
panic(err.Error())
}
} | [
"func TestEmptyResponse(t *testing.T) {\n\tcheck := assert.New(t)\n\terr := HandleErrorStatusCode(0, []byte(``))\n\tcheck.Contains(err.Error(), \"internal error\")\n}",
"func cGoTestMayBeError(t *testing.T, message string, aPossibleError error) {\n if aPossibleError != nil {\n t.Errorf(\"%s\\nerror: %s\", message, aPossibleError.Error())\n }\n}",
"func AssertErrIsNil(t *testing.T, err error) {\n\t// Inserted model's id should not be nil:\n\trequire.Nil(t, err, \"Assertion err: %v\", err)\n}",
"func AssertNoOutput(args ...interface{}) {\n\tt := instance.Load().(*TestBar)\n\tif t.stdout.WaitForWrite(negativeTimeout) {\n\t\tassert.Fail(t, \"Expected no output\", args...)\n\t}\n}",
"func (t *TestModule) AssertNotStarted(args ...interface{}) {\n\tt.Lock()\n\tdefer t.Unlock()\n\tt.require.False(t.started, args...)\n}",
"func (o *OutputTester) AssertNoOutput(message string) {\n\tselect {\n\tcase <-o.outs:\n\t\tassert.Fail(o, \"expected no update\", message)\n\tcase <-time.After(negativeTimeout):\n\t}\n}",
"func (iter *cacheMergeIterator) assertValid() {\n\tif err := iter.Error(); err != nil {\n\t\tpanic(err)\n\t}\n}",
"func AssertNoUnmatchedRequests(t UnmatchedRequestsTestingT) {\n\tif gock.HasUnmatchedRequest() { // nolint:nestif\n\t\tt.Log(\"gock has unmatched requests. their contents will be dumped here.\\n\")\n\n\t\tfor _, r := range gock.GetUnmatchedRequests() {\n\t\t\tprintRequestData(t, r)\n\t\t\tfmt.Println()\n\t\t}\n\n\t\tt.FailNow()\n\t}\n}",
"func (*util) Assert(val bool, msg string) {\n\tif !val {\n\t\tpanic(msg)\n\t}\n}",
"func AssertFalse(t *testing.T, msg string, ok bool) {\n\tif ok {\n\t\tt.Fatalf(msg)\n\t}\n}",
"func Test_ConstructorFailure(t *testing.T) {\n\n\tif _, err := newLogger(10, -1, \"logs\"); err == nil {\n\t\tt.Errorf(\"Constructor should have failed\")\n\t} else if err.Error() != \"amount and capacity of files must be over 0\" {\n\t\tt.Errorf(\"Unexpected error message\")\n\t}\n}",
"func TestFailures(t *testing.T) {\n\tif True() {\n\t\tt.SkipNow()\n\t}\n\n\tAssert(t, !True(), \"We have a problem Houston!\", true)\n\tEquals(t, false, true)\n\tNotEquals(t, 0, 0)\n\tOK(t, err)\n\terr = errors.New(\"\")\n\tNotOK(t, err)\n}",
"func TestNotExistingDomain(t *testing.T) {\n domain, isFree := check(\"some.example\")\n fmt.Println(domain, isFree)\n if isFree == false {\n fmt.Println(\"this domain is free\")\n t.Fail()\n }\n}",
"func checkErr(t *testing.T, err error) {\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}",
"func Ok(tb testing.TB, err error) {\n\tassert(tb, err == nil, \"unexpected error: %s\", err)\n}",
"func Test_Healthcheck__no_errors(t *testing.T) {\n\tres := HandleHealthCheck()\n\tassert.Equal(t, http.StatusOK, res.Code)\n}",
"func assertPanic(t *testing.T) {\n\tif r := recover(); r == nil {\n\t\tt.Fail()\n\t}\n}",
"func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {\n\tif !assert.Empty(t, object, msgAndArgs...) {\n\t\tt.FailNow()\n\t}\n}",
"func AssertTrue(t *testing.T, msg string, ok bool) {\n\tif !ok {\n\t\tt.Fatalf(msg)\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test encoding message sizes | func TestEncodeMessageSize(t *testing.T) {
assertEq(encodeMessageSize(0), []byte{0, 0, 0, 0, 0}, t)
assertEq(encodeMessageSize(1), []byte{0, 0, 0, 0, 1}, t)
assertEq(encodeMessageSize(255), []byte{0, 0, 0, 0, 255}, t)
assertEq(encodeMessageSize(256), []byte{0, 0, 0, 1, 0}, t)
assertEq(encodeMessageSize(257), []byte{0, 0, 0, 1, 1}, t)
assertEq(encodeMessageSize(4311810305), []byte{1, 1, 1, 1, 1}, t)
assertEq(encodeMessageSize(4328719365), []byte{1, 2, 3, 4, 5}, t)
assertEq(encodeMessageSize(47362409218), []byte{11, 7, 5, 3, 2}, t)
assertEq(encodeMessageSize(1099511627775), []byte{255, 255, 255, 255, 255}, t)
} | [
"func TestDecodeMessageSize(t *testing.T) {\n\tassertEq(decodeMessageSize([]byte{0, 0, 0, 0, 0}), 0, t)\n\tassertEq(decodeMessageSize([]byte{0, 0, 0, 0, 1}), 1, t)\n\tassertEq(decodeMessageSize([]byte{0, 0, 0, 0, 255}), 255, t)\n\tassertEq(decodeMessageSize([]byte{0, 0, 0, 1, 0}), 256, t)\n\tassertEq(decodeMessageSize([]byte{0, 0, 0, 1, 1}), 257, t)\n\tassertEq(decodeMessageSize([]byte{1, 1, 1, 1, 1}), 4311810305, t)\n\tassertEq(decodeMessageSize([]byte{1, 2, 3, 4, 5}), 4328719365, t)\n\tassertEq(decodeMessageSize([]byte{11, 7, 5, 3, 2}), 47362409218, t)\n\tassertEq(decodeMessageSize([]byte{255, 255, 255, 255, 255}), 1099511627775, t)\n}",
"func encodeSizeAnnounceBlocksMessage(obj *AnnounceBlocksMessage) uint64 {\n\ti0 := uint64(0)\n\n\t// obj.MaxBkSeq\n\ti0 += 8\n\n\treturn i0\n}",
"func TestSizes(t *testing.T) {\n\t// Server setup\n\ts, err := NewServer(\"localhost\", 0, 65535)\n\tif err != nil {\n\t\tt.Errorf(\"failed to create server: %v\", err)\n\t\treturn\n\t}\n\taddr := s.l.Addr().String()\n\n\terr = registerHandlers(s)\n\tif err != nil {\n\t\tt.Errorf(\"failed to register handlers: %v\", err)\n\t\treturn\n\t}\n\tgo s.Serve()\n\n\t// connection setup\n\tn, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\tt.Errorf(\"unable to connect to server: %v\", err)\n\t\treturn\n\t}\n\tn.SetDeadline(time.Now().Add(3 * time.Second))\n\tb := bufio.NewReader(n)\n\n\t// 249 character key test - PASS\n\tvar d string\n\tfor i := 0; i < 249; i++ {\n\t\td = d + \"a\"\n\t}\n\tn.Write([]byte(\"set \" + d + \"\\r\\ndata\\r\\n\"))\n\tr, err := b.ReadString('\\n')\n\tif err != nil {\n\t\tt.Errorf(\"set 250 char key failed: %v\", err)\n\t}\n\tif r != \"STORED\\r\\n\" {\n\t\tt.Errorf(\"set 249 char failed: %v\", r)\n\t}\n\n\t// 250 character key test - FAIL\n\td = \"\"\n\tfor i := 0; i < 250; i++ {\n\t\td = d + \"a\"\n\t}\n\tn.Write([]byte(\"set \" + d + \"\\r\\n\"))\n\tr, err = b.ReadString('\\n')\n\tif err != nil {\n\t\tt.Errorf(\"set 250 char key failed: %v\", err)\n\t}\n\tif r != \"ERROR key can only be 250 characters long\\r\\n\" {\n\t\tt.Errorf(\"set 250 char didn't fail: %v\", r)\n\t}\n\n\t// 8k-1 character data test - PASS\n\td = \"\"\n\tfor i := 0; i < 8191; i++ {\n\t\td = d + \"a\"\n\t}\n\tn.Write([]byte(\"set largeData\\r\\n\" + d + \"\\r\\n\"))\n\tr, err = b.ReadString('\\n')\n\tif err != nil {\n\t\tt.Errorf(\"set 8k data failed: %v\", err)\n\t}\n\tif r != \"STORED\\r\\n\" {\n\t\tt.Errorf(\"set 8k-1 data failed: %v\", r)\n\t}\n\n\t// 8k character data test - FAIL\n\td = \"\"\n\tfor i := 0; i < 8192; i++ {\n\t\td = d + \"a\"\n\t}\n\tn.Write([]byte(\"set largeData\\r\\n\" + d + \"\\r\\n\"))\n\tr, err = b.ReadString('\\n')\n\tif err != nil {\n\t\tt.Errorf(\"set 8k data failed: %v\", err)\n\t}\n\tif r != \"ERROR data can only be 8192 characters long\\r\\n\" {\n\t\tt.Errorf(\"set 8k data didn't fail: %v\", r)\n\t}\n\n\ts.Close()\n}",
"func (m MyMessage) Size() uint32 {\n\thdrStr := fmt.Sprintf(\"%s\\r\\n\", m.Header())\n\treturn uint32(len(hdrStr)) + uint32(len(m.Body()))\n}",
"func TestWillMessageFit(t *testing.T) {\n\tvar TestWillMessageFit = []struct {\n\t\tname string\n\t\tmessage []rune\n\t\tencoder Encoder\n\t\tmessageLength int\n\t\texpected bool\n\t}{\n\t\t{\n\t\t\t\"Not too long, no special GSM characters\",\n\t\t\t[]rune(\"Not too long!\"),\n\t\t\tNewGSM(),\n\t\t\t13,\n\t\t\ttrue,\n\t\t},\n\t\t{\n\t\t\t\"Not too long, no special unicode characters\",\n\t\t\t[]rune(\"Not too long! 你好朋友\"),\n\t\t\tNewUTF16(),\n\t\t\t18,\n\t\t\ttrue,\n\t\t},\n\t\t{\n\t\t\t\"Not too long, with special GSM character\",\n\t\t\t[]rune(\"Not too long ~\"),\n\t\t\tNewGSM(),\n\t\t\t15,\n\t\t\ttrue,\n\t\t},\n\t\t{\n\t\t\t\"Not too long, with special unicode character\",\n\t\t\t[]rune(\"Not too long 🙂\"),\n\t\t\tNewUTF16(),\n\t\t\t15,\n\t\t\ttrue,\n\t\t},\n\t\t{\n\t\t\t\"Too long, no special GSM characters\",\n\t\t\t[]rune(\"Oops! Too long\"),\n\t\t\tNewGSM(),\n\t\t\t13,\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\t\"Too long, no special unicode characters\",\n\t\t\t[]rune(\"Oops! Too long 你好朋友\"),\n\t\t\tNewUTF16(),\n\t\t\t18,\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\t\"Too long, with special GSM character\",\n\t\t\t[]rune(\"Oops! Too long ~\"),\n\t\t\tNewGSM(),\n\t\t\t16,\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\t\"Too long, with special unicode character\",\n\t\t\t[]rune(\"Oops! Too long 🙂\"),\n\t\t\tNewUTF16(),\n\t\t\t16,\n\t\t\tfalse,\n\t\t},\n\t}\n\n\tfor _, tt := range TestWillMessageFit {\n\t\tfit, err := willMessageFit(tt.message, tt.encoder, tt.messageLength)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"an error '%s' was encountered when checking message fit for test '%s'\", err, tt.name)\n\t\t}\n\t\tassert.Equal(t, tt.expected, fit)\n\t}\n}",
"func TestSize_UnmarshalText_MB(t *testing.T) {\n\tvar s itoml.Size\n\tif err := s.UnmarshalText([]byte(\"200m\")); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if s != 200*(1<<20) {\n\t\tt.Fatalf(\"unexpected size: %d\", s)\n\t}\n}",
"func TestTxSerializeSizeStripped(t *testing.T) {\n\t// Empty tx message.\n\tnoTx := NewMsgTx(1)\n\tnoTx.Version = 1\n\n\ttests := []struct {\n\t\tin *MsgTx // Tx to encode\n\t\tsize int // Expected serialized size\n\t}{\n\t\t// No inputs or outpus.\n\t\t{noTx, 14},\n\n\t\t// Transcaction with an input and an output.\n\t\t{multiTx, 306},\n\t}\n\n\tt.Logf(\"Running %d tests\", len(tests))\n\tfor i, test := range tests {\n\t\tserializedSize := test.in.SerializeSize()\n\t\tif serializedSize != test.size {\n\t\t\tt.Errorf(\"MsgTx.SerializeSize: #%d got: %d, want: %d\", i,\n\t\t\t\tserializedSize, test.size)\n\t\t\tcontinue\n\t\t}\n\t}\n}",
"func (z *UpdateBillingHeader) Msgsize9ef447() (s int) {\n\ts = 1 + 6 + z.Nonce.Msgsize() + 6 + 1 + 5 + hsp.Uint32Size + 3 + hsp.Uint32Size + 9 + z.Receiver.Msgsize() + 6 + hsp.ArrayHeaderSize\n\tfor za0001 := range z.Users {\n\t\tif z.Users[za0001] == nil {\n\t\t\ts += hsp.NilSize\n\t\t} else {\n\t\t\ts += z.Users[za0001].Msgsize()\n\t\t}\n\t}\n\ts += 2 + hsp.Int32Size\n\treturn\n}",
"func checkAvaiableSize(message *[]byte, img image.Image) error {\n\n\t// Multiply by 3 because each pixel has 3 values (RGB)\n\t// Where 1 bit can be encoded in their respective 3 RGB values in the LSB\n\tmaxSize := img.Bounds().Max.Y * img.Bounds().Max.X * 3\n\n\t// Check if the image is big enough for the message\n\tif len(*message) > maxSize {\n\t\terrMsg := fmt.Sprintf(\"The image is not big enough to encrypt the message. Avaiable: %v, Needed: %v, diff: %v\", maxSize, len(*message), len(*message)-maxSize)\n\t\treturn errors.New(errMsg)\n\t}\n\treturn nil\n}",
"func TestSize_UnmarshalText_GB(t *testing.T) {\n\tvar s itoml.Size\n\tif err := s.UnmarshalText([]byte(\"1g\")); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if s != 1073741824 {\n\t\tt.Fatalf(\"unexpected size: %d\", s)\n\t}\n}",
"func (z Message) Msgsize() (s int) {\n\ts = 1 + msgp.Int64Size + msgp.IntSize + msgp.StringPrefixSize + len(z.ID)\n\treturn\n}",
"func encodeRequestSize(request *bytes.Buffer) {\n\tbinary.BigEndian.PutUint32(request.Bytes()[0:], uint32(request.Len()-4))\n}",
"func TestEncodedValSize(t *testing.T) {\n\trequire.Equal(t, encodedValSize, int(unsafe.Sizeof(cacheValue{})), \"encodedValSize should equal sizeof(cacheValue{})\")\n\trequire.Equal(t, 32, encodedValSize, \"encodedValSize should not change unexpectedly\")\n}",
"func checkTypedSize(typed Typed, descrip string) {\n\tswitch typed.(type) {\n\tcase *GenericHashKey:\n\t\tgot := typed.Length()\n\t\tmin, max := cryptoGenericHashBytesMin, cryptoGenericHashBytesMax\n\t\tcheckSizeInRange(got, min, max, descrip)\n\tcase *SubKey:\n\t\tgot := typed.Length()\n\t\tmin, max := CryptoKDFBytesMin, CryptoKDFBytesMax\n\t\tcheckSizeInRange(got, min, max, descrip)\n\tdefault:\n\t\texpected := typed.Size()\n\t\tgot := typed.Length()\n\t\tif got != expected {\n\t\t\tpanic(fmt.Sprintf(\"Incorrect %s buffer size, expected (%d), got (%d).\\n\", descrip, expected, got))\n\t\t}\n\t}\n}",
"func EncodingInfoListSize(list []EncodingInfo) int {\n\tsize := 0\n\tfor _, item := range list {\n\t\tsize += (20 + xgb.Pad((int(item.NameSize) * 1)))\n\t}\n\treturn size\n}",
"func (this Lvlq) EncodedSize() int {\n\tvalue := this\n\tif value == 0 {\n\t\treturn 1\n\t}\n\tsize := 0\n\tfor value > 0 {\n\t\tvalue <<= 7\n\t\tsize++\n\t}\n\treturn size\n}",
"func TestEncode(t *testing.T) {\n\ttt := []struct {\n\t\tname string\n\t\tm msg.MSG\n\t\tlength int\n\t}{\n\t\t{\n\t\t\tname: \"length\",\n\t\t\tm: msg.MSG{\n\t\t\t\tSender: \"BillKenned\",\n\t\t\t\tRecipient: \"JillKenned\",\n\t\t\t\tData: \"hello\",\n\t\t\t},\n\t\t\tlength: 27,\n\t\t},\n\t\t{\n\t\t\tname: \"shortname\",\n\t\t\tm: msg.MSG{\n\t\t\t\tSender: \"Bill\",\n\t\t\t\tRecipient: \"Cory\",\n\t\t\t\tData: \"helloworld\",\n\t\t\t},\n\t\t\tlength: 32,\n\t\t},\n\t}\n\n\tt.Log(\"Given the need to test encoding/decoding.\")\n\t{\n\t\tfor i, tst := range tt {\n\t\t\tt.Logf(\"\\tTest %d:\\t%s\", i, tst.name)\n\t\t\t{\n\t\t\t\tdata := msg.Encode(tst.m)\n\t\t\t\tif len(data) != tst.length {\n\t\t\t\t\tt.Fatalf(\"\\t%s\\tShould have the correct number of bytes : exp[%d] got[%d]\\n\", failed, tst.length, len(data))\n\t\t\t\t}\n\t\t\t\tt.Logf(\"\\t%s\\tShould have the correct number of bytes.\\n\", succeed)\n\n\t\t\t\tm := msg.Decode(data)\n\t\t\t\tif m.Sender != tst.m.Sender {\n\t\t\t\t\tt.Fatalf(\"\\t%s\\tShould have the correct Sender : exp[%v] got[%v]\\n\", failed, tst.m.Sender, m.Sender)\n\t\t\t\t}\n\t\t\t\tt.Logf(\"\\t%s\\tShould have the correct Sender.\\n\", succeed)\n\n\t\t\t\tif m.Recipient != tst.m.Recipient {\n\t\t\t\t\tt.Fatalf(\"\\t%s\\tShould have the correct Recipient : exp[%v] got[%v]\\n\", failed, tst.m.Recipient, m.Recipient)\n\t\t\t\t}\n\t\t\t\tt.Logf(\"\\t%s\\tShould have the correct Recipient.\\n\", succeed)\n\n\t\t\t\tif m.Data != tst.m.Data {\n\t\t\t\t\tt.Fatalf(\"\\t%s\\tShould have the correct data : exp[%s] got[%s]\\n\", failed, tst.m.Data, m.Data)\n\t\t\t\t}\n\t\t\t\tt.Logf(\"\\t%s\\tShould have the correct data.\\n\", succeed)\n\t\t\t}\n\t\t}\n\t}\n}",
"func TestBlockSerializeSize(t *testing.T) {\n\t// Block with no transactions.\n\tnoTxBlock := NewMsgBlock(&blockOne.Header)\n\n\ttests := []struct {\n\t\tin *MsgBlock // Block to encode\n\t\tsize int // Expected serialized size\n\t}{\n\t\t// Block with no transactions.\n\t\t{noTxBlock, 186},\n\n\t\t// First block in the mainnet block DAG.\n\t\t{&blockOne, len(blockOneBytes)},\n\t}\n\n\tt.Logf(\"Running %d tests\", len(tests))\n\tfor i, test := range tests {\n\t\tserializedSize := test.in.SerializeSize()\n\t\tif serializedSize != test.size {\n\t\t\tt.Errorf(\"MsgBlock.SerializeSize: #%d got: %d, want: \"+\n\t\t\t\t\"%d\", i, serializedSize, test.size)\n\n\t\t\tcontinue\n\t\t}\n\t}\n}",
"func Size(proto string, size int) int {\n\tif proto == \"tcp\" {\n\t\treturn dns.MaxMsgSize\n\t}\n\tif size < dns.MinMsgSize {\n\t\treturn dns.MinMsgSize\n\t}\n\treturn size\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test decoding message sizes | func TestDecodeMessageSize(t *testing.T) {
assertEq(decodeMessageSize([]byte{0, 0, 0, 0, 0}), 0, t)
assertEq(decodeMessageSize([]byte{0, 0, 0, 0, 1}), 1, t)
assertEq(decodeMessageSize([]byte{0, 0, 0, 0, 255}), 255, t)
assertEq(decodeMessageSize([]byte{0, 0, 0, 1, 0}), 256, t)
assertEq(decodeMessageSize([]byte{0, 0, 0, 1, 1}), 257, t)
assertEq(decodeMessageSize([]byte{1, 1, 1, 1, 1}), 4311810305, t)
assertEq(decodeMessageSize([]byte{1, 2, 3, 4, 5}), 4328719365, t)
assertEq(decodeMessageSize([]byte{11, 7, 5, 3, 2}), 47362409218, t)
assertEq(decodeMessageSize([]byte{255, 255, 255, 255, 255}), 1099511627775, t)
} | [
"func TestSize_UnmarshalText_MB(t *testing.T) {\n\tvar s itoml.Size\n\tif err := s.UnmarshalText([]byte(\"200m\")); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if s != 200*(1<<20) {\n\t\tt.Fatalf(\"unexpected size: %d\", s)\n\t}\n}",
"func TestEncodeMessageSize(t *testing.T) {\n\tassertEq(encodeMessageSize(0), []byte{0, 0, 0, 0, 0}, t)\n\tassertEq(encodeMessageSize(1), []byte{0, 0, 0, 0, 1}, t)\n\tassertEq(encodeMessageSize(255), []byte{0, 0, 0, 0, 255}, t)\n\tassertEq(encodeMessageSize(256), []byte{0, 0, 0, 1, 0}, t)\n\tassertEq(encodeMessageSize(257), []byte{0, 0, 0, 1, 1}, t)\n\tassertEq(encodeMessageSize(4311810305), []byte{1, 1, 1, 1, 1}, t)\n\tassertEq(encodeMessageSize(4328719365), []byte{1, 2, 3, 4, 5}, t)\n\tassertEq(encodeMessageSize(47362409218), []byte{11, 7, 5, 3, 2}, t)\n\tassertEq(encodeMessageSize(1099511627775), []byte{255, 255, 255, 255, 255}, t)\n}",
"func encodeSizeAnnounceBlocksMessage(obj *AnnounceBlocksMessage) uint64 {\n\ti0 := uint64(0)\n\n\t// obj.MaxBkSeq\n\ti0 += 8\n\n\treturn i0\n}",
"func (m MyMessage) Size() uint32 {\n\thdrStr := fmt.Sprintf(\"%s\\r\\n\", m.Header())\n\treturn uint32(len(hdrStr)) + uint32(len(m.Body()))\n}",
"func TestMsgParsing(t *testing.T) {\n\tmetricHandler := New().(*collectdMetricsHandler)\n\tt.Run(\"Invalid Messages\", func(t *testing.T) {\n\t\tfor test, blob := range testMsgsInvalid {\n\t\t\tmetricHandler.totalDecodeErrors = 0\n\t\t\tmetricHandler.Handle([]byte(blob))\n\t\t\tassert.Equal(t, uint64(1), metricHandler.totalDecodeErrors, fmt.Sprintf(\"Wrong # of errors in test iteration '%s'\", test))\n\t\t}\n\t})\n\n\tmetricHandler.totalDecodeErrors = 0\n\tt.Run(\"Valid Messages\", func(t *testing.T) {\n\t\tfor test, blob := range testMsgsValid {\n\t\t\tmetrics := metricHandler.Handle([]byte(blob))\n\t\t\tassert.Equal(t, uint64(0), metricHandler.totalDecodeErrors, test)\n\n\t\t\tassert.Equal(t, validResults[test], metrics[:len(validResults[test])], test)\n\t\t}\n\t})\n}",
"func GetMessageCount(message pdutext.Codec) int {\n\n\tmaxLength := 160\n\tmessageParts := 1\n\n\trawMsg := message.Encode()\n\tmessageLength := len(rawMsg)\n\tif messageLength > maxLength {\n\t\t// Now check the amount of messages\n\t\tmaxLength := 153 // 160-7 (UDH with 2 byte reference number) (bytes)\n\t\tif message.Type() == pdutext.UCS2Type {\n\t\t\tmaxLength = 132 // to avoid a character being split between payloads (bytes)\n\t\t}\n\t\tmessageParts = int((len(rawMsg)-1)/maxLength) + 1\n\n\t}\n\treturn messageParts\n}",
"func TestSize_UnmarshalText_GB(t *testing.T) {\n\tvar s itoml.Size\n\tif err := s.UnmarshalText([]byte(\"1g\")); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if s != 1073741824 {\n\t\tt.Fatalf(\"unexpected size: %d\", s)\n\t}\n}",
"func (z Message) Msgsize() (s int) {\n\ts = 1 + msgp.Int64Size + msgp.IntSize + msgp.StringPrefixSize + len(z.ID)\n\treturn\n}",
"func TestTooLargeDecoder(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tenc := encoding.Marshal(Block{})\n\t// change number of transactions to large number\n\tcopy(enc[len(enc)-8:], encoding.EncUint64(^uint64(0)))\n\tvar block Block\n\terr := encoding.Unmarshal(enc, &block)\n\tif err == nil {\n\t\tt.Fatal(\"expected error, got nil\")\n\t}\n\n\tvar arb [][]byte\n\tfor i := 0; i < 4; i++ {\n\t\tarb = append(arb, make([]byte, encoding.MaxSliceSize-1))\n\t}\n\tblock.Transactions = []Transaction{{\n\t\tArbitraryData: arb,\n\t}}\n\tenc = encoding.Marshal(block)\n\terr = encoding.Unmarshal(enc, &block)\n\tif err == nil {\n\t\tt.Fatal(\"expected error, got nil\")\n\t}\n}",
"func TestSizes(t *testing.T) {\n\t// Server setup\n\ts, err := NewServer(\"localhost\", 0, 65535)\n\tif err != nil {\n\t\tt.Errorf(\"failed to create server: %v\", err)\n\t\treturn\n\t}\n\taddr := s.l.Addr().String()\n\n\terr = registerHandlers(s)\n\tif err != nil {\n\t\tt.Errorf(\"failed to register handlers: %v\", err)\n\t\treturn\n\t}\n\tgo s.Serve()\n\n\t// connection setup\n\tn, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\tt.Errorf(\"unable to connect to server: %v\", err)\n\t\treturn\n\t}\n\tn.SetDeadline(time.Now().Add(3 * time.Second))\n\tb := bufio.NewReader(n)\n\n\t// 249 character key test - PASS\n\tvar d string\n\tfor i := 0; i < 249; i++ {\n\t\td = d + \"a\"\n\t}\n\tn.Write([]byte(\"set \" + d + \"\\r\\ndata\\r\\n\"))\n\tr, err := b.ReadString('\\n')\n\tif err != nil {\n\t\tt.Errorf(\"set 250 char key failed: %v\", err)\n\t}\n\tif r != \"STORED\\r\\n\" {\n\t\tt.Errorf(\"set 249 char failed: %v\", r)\n\t}\n\n\t// 250 character key test - FAIL\n\td = \"\"\n\tfor i := 0; i < 250; i++ {\n\t\td = d + \"a\"\n\t}\n\tn.Write([]byte(\"set \" + d + \"\\r\\n\"))\n\tr, err = b.ReadString('\\n')\n\tif err != nil {\n\t\tt.Errorf(\"set 250 char key failed: %v\", err)\n\t}\n\tif r != \"ERROR key can only be 250 characters long\\r\\n\" {\n\t\tt.Errorf(\"set 250 char didn't fail: %v\", r)\n\t}\n\n\t// 8k-1 character data test - PASS\n\td = \"\"\n\tfor i := 0; i < 8191; i++ {\n\t\td = d + \"a\"\n\t}\n\tn.Write([]byte(\"set largeData\\r\\n\" + d + \"\\r\\n\"))\n\tr, err = b.ReadString('\\n')\n\tif err != nil {\n\t\tt.Errorf(\"set 8k data failed: %v\", err)\n\t}\n\tif r != \"STORED\\r\\n\" {\n\t\tt.Errorf(\"set 8k-1 data failed: %v\", r)\n\t}\n\n\t// 8k character data test - FAIL\n\td = \"\"\n\tfor i := 0; i < 8192; i++ {\n\t\td = d + \"a\"\n\t}\n\tn.Write([]byte(\"set largeData\\r\\n\" + d + \"\\r\\n\"))\n\tr, err = b.ReadString('\\n')\n\tif err != nil {\n\t\tt.Errorf(\"set 8k data failed: %v\", err)\n\t}\n\tif r != \"ERROR data can only be 8192 characters long\\r\\n\" {\n\t\tt.Errorf(\"set 8k data didn't fail: %v\", r)\n\t}\n\n\ts.Close()\n}",
"func (z *UpdateBillingHeader) Msgsize9ef447() (s int) {\n\ts = 1 + 6 + z.Nonce.Msgsize() + 6 + 1 + 5 + hsp.Uint32Size + 3 + hsp.Uint32Size + 9 + z.Receiver.Msgsize() + 6 + hsp.ArrayHeaderSize\n\tfor za0001 := range z.Users {\n\t\tif z.Users[za0001] == nil {\n\t\t\ts += hsp.NilSize\n\t\t} else {\n\t\t\ts += z.Users[za0001].Msgsize()\n\t\t}\n\t}\n\ts += 2 + hsp.Int32Size\n\treturn\n}",
"func readMsgLen(r io.Reader) (uint32, error) {\r\n if l, err := readLen4(r); err != nil {\r\n return 0, err\r\n } else {\r\n return l, nil\r\n }\r\n}",
"func TestEncode(t *testing.T) {\n\ttt := []struct {\n\t\tname string\n\t\tm msg.MSG\n\t\tlength int\n\t}{\n\t\t{\n\t\t\tname: \"length\",\n\t\t\tm: msg.MSG{\n\t\t\t\tSender: \"BillKenned\",\n\t\t\t\tRecipient: \"JillKenned\",\n\t\t\t\tData: \"hello\",\n\t\t\t},\n\t\t\tlength: 27,\n\t\t},\n\t\t{\n\t\t\tname: \"shortname\",\n\t\t\tm: msg.MSG{\n\t\t\t\tSender: \"Bill\",\n\t\t\t\tRecipient: \"Cory\",\n\t\t\t\tData: \"helloworld\",\n\t\t\t},\n\t\t\tlength: 32,\n\t\t},\n\t}\n\n\tt.Log(\"Given the need to test encoding/decoding.\")\n\t{\n\t\tfor i, tst := range tt {\n\t\t\tt.Logf(\"\\tTest %d:\\t%s\", i, tst.name)\n\t\t\t{\n\t\t\t\tdata := msg.Encode(tst.m)\n\t\t\t\tif len(data) != tst.length {\n\t\t\t\t\tt.Fatalf(\"\\t%s\\tShould have the correct number of bytes : exp[%d] got[%d]\\n\", failed, tst.length, len(data))\n\t\t\t\t}\n\t\t\t\tt.Logf(\"\\t%s\\tShould have the correct number of bytes.\\n\", succeed)\n\n\t\t\t\tm := msg.Decode(data)\n\t\t\t\tif m.Sender != tst.m.Sender {\n\t\t\t\t\tt.Fatalf(\"\\t%s\\tShould have the correct Sender : exp[%v] got[%v]\\n\", failed, tst.m.Sender, m.Sender)\n\t\t\t\t}\n\t\t\t\tt.Logf(\"\\t%s\\tShould have the correct Sender.\\n\", succeed)\n\n\t\t\t\tif m.Recipient != tst.m.Recipient {\n\t\t\t\t\tt.Fatalf(\"\\t%s\\tShould have the correct Recipient : exp[%v] got[%v]\\n\", failed, tst.m.Recipient, m.Recipient)\n\t\t\t\t}\n\t\t\t\tt.Logf(\"\\t%s\\tShould have the correct Recipient.\\n\", succeed)\n\n\t\t\t\tif m.Data != tst.m.Data {\n\t\t\t\t\tt.Fatalf(\"\\t%s\\tShould have the correct data : exp[%s] got[%s]\\n\", failed, tst.m.Data, m.Data)\n\t\t\t\t}\n\t\t\t\tt.Logf(\"\\t%s\\tShould have the correct data.\\n\", succeed)\n\t\t\t}\n\t\t}\n\t}\n}",
"func DecodedLen(x int) int { return x / 2 }",
"func (c *client) peekMessageSize(br *bufio.Reader) (message.MessageType, int, error) {\n\tvar (\n\t\tb []byte\n\t\terr error\n\t\tcnt int = 2\n\t)\n\n\t// Let's read enough bytes to get the message header (msg type, remaining length)\n\tfor {\n\t\t// If we have read 5 bytes and still not done, then there's a problem.\n\t\tif cnt > 5 {\n\t\t\treturn 0, 0, fmt.Errorf(\"sendrecv/peekMessageSize: 4th byte of remaining length has continuation bit set\")\n\t\t}\n\n\t\t// Peek cnt bytes from the input buffer.\n\t\tb, err = br.Peek(cnt) //读取数据后in的cursor指针不动\n\t\tif err != nil {\n\t\t\t// c.Errorf(\"Peek:%v\", err)\n\t\t\treturn 0, 0, err\n\t\t}\n\n\t\t// If not enough bytes are returned, then continue until there's enough.\n\t\tif len(b) < cnt {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If we got enough bytes, then check the last byte to see if the continuation\n\t\t// bit is set. If so, increment cnt and continue peeking\n\t\tif b[cnt-1] >= 0x80 {\n\t\t\tcnt++\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Get the remaining length of the message\n\tremlen, m := binary.Uvarint(b[1:])\n\n\t// Total message length is remlen + 1 (msg type) + m (remlen bytes)\n\ttotal := int(remlen) + 1 + m //包全长\n\n\tmtype := message.MessageType(b[0] >> 4) //固定报头的控制报文类型\n\n\treturn mtype, total, err\n}",
"func Decode(v ProtocolVersion, buf []byte) (msg IFace, total int, err error) {\n\tdefer func() {\n\t\t// TODO(troian): this case might be improved\n\t\t// Panic might be provided during message decode with malformed len\n\t\t// For example on length-prefixed payloads/topics or properties:\n\n\t\t// length prefix of payload with size 4 but actual payload size is 2\n\t\t// | payload\n\t\t// | |\n\t\t// 00040102\n\t\t// in that case buf[lpEndOffset:lpEndOffset+lpLen] will panic due to out-of-bound\n\t\t//\n\t\t// Ideally such cases should be handled by each message implementation\n\t\t// but it might be worth doing such checks (there might be many for each message) on each decode\n\t\t// as it is abnormal and server must close connection\n\t\tif r := recover(); r != nil {\n\t\t\tfmt.Println(r)\n\t\t\tmsg = nil\n\t\t\ttotal = 0\n\t\t\terr = ErrPanicDetected\n\t\t}\n\t}()\n\n\tif len(buf) < 1 {\n\t\treturn nil, 0, ErrInsufficientBufferSize\n\t}\n\n\t// [MQTT-2.2]\n\tmType := Type(buf[0] >> offsetPacketType)\n\n\t// [MQTT-2.2.1] Type.New validates message type\n\tif msg, err = New(v, mType); err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tif total, err = msg.decode(buf); err != nil {\n\t\treturn nil, total, err\n\t}\n\n\treturn msg, total, nil\n}",
"func readMessageSize(block []byte, index int) int {\n\tif index+3 >= len(block) {\n\t\treturn 0\n\t}\n\treturn int(block[0+index])<<24 |\n\t\tint(block[1+index])<<16 |\n\t\tint(block[2+index])<<8 |\n\t\tint(block[3+index])\n}",
"func payloadLenForMsgLen(messageLength int) int {\n\theaderLength := len(recordsHeader)\n\tpayloadLength := messageLength - 4 - 4 - 4 - headerLength - 4\n\treturn payloadLength\n}",
"func Size(proto string, size int) int {\n\tif proto == \"tcp\" {\n\t\treturn dns.MaxMsgSize\n\t}\n\tif size < dns.MinMsgSize {\n\t\treturn dns.MinMsgSize\n\t}\n\treturn size\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test object encoding and decoding | func TestObjectEncodeDecode(t *testing.T) {
// Encode/decode an integer
intValue := 29275
encodedInt, err := encodeObject(intValue)
assertNoErr(err, t)
decodedInt, err := decodeObject[int](encodedInt)
assertNoErr(err, t)
assertEq(decodedInt, intValue, t)
// Encode/decode a string
stringValue := "Hello, encoder!"
encodedString, err := encodeObject(stringValue)
assertNoErr(err, t)
decodedString, err := decodeObject[string](encodedString)
assertNoErr(err, t)
assertEq(decodedString, stringValue, t)
// Encode/decode a slice
sliceValue := []int{2, 3, 5, 7, 11}
encodedSlice, err := encodeObject(sliceValue)
assertNoErr(err, t)
decodedSlice, err := decodeObject[[]int](encodedSlice)
assertNoErr(err, t)
assertEq(decodedSlice, sliceValue, t)
// Encode/decode a map
mapValue := map[int]int{0: 1, 1: 1, 2: 2, 3: 6, 4: 24, 5: 120}
encodedMap, err := encodeObject(mapValue)
assertNoErr(err, t)
decodedMap, err := decodeObject[map[int]int](encodedMap)
assertNoErr(err, t)
assertEq(decodedMap, mapValue, t)
// Encode/decode a struct
structValue := person{
Name: "Will",
Age: 24,
WritesInGo: true,
PrefersRust: true,
}
encodedStruct, err := encodeObject(structValue)
assertNoErr(err, t)
decodedStruct, err := decodeObject[person](encodedStruct)
assertNoErr(err, t)
assertEq(decodedStruct, structValue, t)
} | [
"func TestEncode(t *testing.T) {\n\ttt := []struct {\n\t\tname string\n\t\tm msg.MSG\n\t\tlength int\n\t}{\n\t\t{\n\t\t\tname: \"length\",\n\t\t\tm: msg.MSG{\n\t\t\t\tSender: \"BillKenned\",\n\t\t\t\tRecipient: \"JillKenned\",\n\t\t\t\tData: \"hello\",\n\t\t\t},\n\t\t\tlength: 27,\n\t\t},\n\t\t{\n\t\t\tname: \"shortname\",\n\t\t\tm: msg.MSG{\n\t\t\t\tSender: \"Bill\",\n\t\t\t\tRecipient: \"Cory\",\n\t\t\t\tData: \"helloworld\",\n\t\t\t},\n\t\t\tlength: 32,\n\t\t},\n\t}\n\n\tt.Log(\"Given the need to test encoding/decoding.\")\n\t{\n\t\tfor i, tst := range tt {\n\t\t\tt.Logf(\"\\tTest %d:\\t%s\", i, tst.name)\n\t\t\t{\n\t\t\t\tdata := msg.Encode(tst.m)\n\t\t\t\tif len(data) != tst.length {\n\t\t\t\t\tt.Fatalf(\"\\t%s\\tShould have the correct number of bytes : exp[%d] got[%d]\\n\", failed, tst.length, len(data))\n\t\t\t\t}\n\t\t\t\tt.Logf(\"\\t%s\\tShould have the correct number of bytes.\\n\", succeed)\n\n\t\t\t\tm := msg.Decode(data)\n\t\t\t\tif m.Sender != tst.m.Sender {\n\t\t\t\t\tt.Fatalf(\"\\t%s\\tShould have the correct Sender : exp[%v] got[%v]\\n\", failed, tst.m.Sender, m.Sender)\n\t\t\t\t}\n\t\t\t\tt.Logf(\"\\t%s\\tShould have the correct Sender.\\n\", succeed)\n\n\t\t\t\tif m.Recipient != tst.m.Recipient {\n\t\t\t\t\tt.Fatalf(\"\\t%s\\tShould have the correct Recipient : exp[%v] got[%v]\\n\", failed, tst.m.Recipient, m.Recipient)\n\t\t\t\t}\n\t\t\t\tt.Logf(\"\\t%s\\tShould have the correct Recipient.\\n\", succeed)\n\n\t\t\t\tif m.Data != tst.m.Data {\n\t\t\t\t\tt.Fatalf(\"\\t%s\\tShould have the correct data : exp[%s] got[%s]\\n\", failed, tst.m.Data, m.Data)\n\t\t\t\t}\n\t\t\t\tt.Logf(\"\\t%s\\tShould have the correct data.\\n\", succeed)\n\t\t\t}\n\t\t}\n\t}\n}",
"func TestJSONEncoding(t *testing.T) {\n\ttype Y struct {\n\t\tNum int\n\t}\n\ttype T struct {\n\t\tNum uint64\n\t\tStr string\n\t\tBytes []byte\n\t\tObject Y\n\t}\n\n\tx := T{\n\t\tNum: 1,\n\t\tStr: \"abc\",\n\t\tBytes: []byte{4, 5, 6},\n\t\tObject: Y{Num: 7},\n\t}\n\tbuf := encodeJSON(x)\n\n\tvar xx T\n\terr := DecodeJSON(buf, &xx)\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, x, xx)\n}",
"func TestDecoder(t *testing.T) {\n\n\tvar obj = testStruct{Dummy: \"Dummy\"} // instance\n\n\tr := strings.NewReader(testData)\n\n\t// decode the string into target object\n\tp := PhpSerializer{\n\t\t// Optional, those two are the default\n\t\t// But you could implement your own if needed\n\t\tEncodeNameConverter: &SnakeToUnderscore{},\n\t\tDecodeNameConverter: &UnderscoreToSnake{},\n\t}\n\terr := p.Decode(r, &obj)\n\n\tif err != nil {\n\t\tlog.Print(obj)\n\t\tpanic(err)\n\t}\n\tlog.Print(obj)\n\n\t// check values\n\tif obj.BulkLength != \"8\" {\n\t\tlog.Fatal(\"Wrong BulkLength value: \" + obj.BulkLength)\n\t}\n\tif obj.MinimumQty != \"1\" {\n\t\tlog.Fatal(\"Wrong MinimumQty value: \" + obj.MinimumQty)\n\t}\n\tif obj.MinimumQtyRestrict != 1 {\n\t\tlog.Fatal(\"Wrong MinimumQtyRestrict value.\")\n\t}\n\tif obj.Wholesale != \"12\" {\n\t\tlog.Fatal(\"Wrong Wholesale value: \" + obj.Wholesale)\n\t}\n\tif obj.Dummy != \"Dummy\" {\n\t\tlog.Fatal(\"Wrong Dummy value: \" + obj.Dummy)\n\t}\n\tif len(obj.Products) != 4 {\n\t\tlog.Fatal(\"Wrong number of products.\")\n\t}\n\tif obj.Products[419] != \"419\" {\n\t\tlog.Fatal(\"Wrong value for product #419: \" + obj.Products[419])\n\t}\n\tif obj.Products[420] != \"420\" {\n\t\tlog.Fatal(\"Wrong value for product #420: \" + obj.Products[420])\n\t}\n\tif obj.Products[421] != \"421\" {\n\t\tlog.Fatal(\"Wrong value for product #421: \" + obj.Products[421])\n\t}\n\tif obj.Products[1255] != \"1255\" {\n\t\tlog.Fatal(\"Wrong value for product #1255: \" + obj.Products[1255])\n\t}\n}",
"func TestMyObjectDecryption(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmessage := \"BMhiHh363wsb7kNk7krTtDcey/O6ZcoKDTvVc4yDhZY=\"\n\tdecrypted, decErr := DecryptString(\"enigma\", message, false)\n\n\tassert.NoError(decErr)\n\tcustomStruct := customStruct{\n\t\tFoo: \"hi!\",\n\t\tBar: []int{1, 2, 3, 4, 5},\n\t}\n\tb, err := json.Marshal(customStruct)\n\tassert.NoError(err)\n\tassert.Equal(string(b), decrypted)\n}",
"func TestObjectDecryption(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmessage := \"IDjZE9BHSjcX67RddfCYYg==\"\n\tdecrypted, decErr := DecryptString(\"enigma\", message, false)\n\tassert.NoError(decErr)\n\n\temptyStruct := emptyStruct{}\n\n\tb, err := json.Marshal(emptyStruct)\n\tassert.NoError(err)\n\tassert.Equal(string(b), decrypted)\n}",
"func testBinaryCodecPass(t *testing.T, schema string, datum interface{}, buf []byte) {\n\ttestBinaryEncodePass(t, schema, datum, buf)\n\ttestBinaryDecodePass(t, schema, datum, buf)\n}",
"func TestMyObjectEncryption(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmessage := customStruct{\n\t\tFoo: \"hi!\",\n\t\tBar: []int{1, 2, 3, 4, 5},\n\t}\n\n\tb1, err := json.Marshal(message)\n\tassert.NoError(err)\n\n\tencrypted := EncryptString(\"enigma\", string(b1), false)\n\tassert.Equal(\"BMhiHh363wsb7kNk7krTtDcey/O6ZcoKDTvVc4yDhZY=\", encrypted)\n}",
"func testTranscodeMessage(t *testing.T, target, source Format, original interface{}) {\n\tvar (\n\t\tassert = assert.New(t)\n\t\trequire = require.New(t)\n\n\t\toriginalValue = reflect.ValueOf(original)\n\t\tencodeValue = reflect.New(originalValue.Type())\n\t\tdecodeValue = reflect.New(originalValue.Type())\n\t)\n\n\t// encodeValue is now a pointer to a copy of the original\n\tencodeValue.Elem().Set(originalValue)\n\n\tvar (\n\t\tsourceBuffer bytes.Buffer\n\t\tsourceEncoder = NewEncoder(&sourceBuffer, source)\n\t\tsourceDecoder = NewDecoder(&sourceBuffer, source)\n\n\t\ttargetBuffer bytes.Buffer\n\t\ttargetEncoder = NewEncoder(&targetBuffer, target)\n\t\ttargetDecoder = NewDecoder(&targetBuffer, target)\n\t)\n\n\t// create the input first\n\trequire.NoError(sourceEncoder.Encode(encodeValue.Interface()))\n\n\t// now we can attempt the transcode\n\tmessage, err := TranscodeMessage(targetEncoder, sourceDecoder)\n\tassert.NotNil(message)\n\tassert.NoError(err)\n\n\tassert.NoError(targetDecoder.Decode(decodeValue.Interface()))\n\tassert.Equal(encodeValue.Elem().Interface(), decodeValue.Elem().Interface())\n}",
"func TestEncodeDecode(t *testing.T) {\n\ttype test struct {\n\t\tencoding Encoding\n\t\tbytes []byte\n\t\tstr string\n\t}\n\n\tid := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}\n\ttests := []test{\n\t\t{\n\t\t\tCB58,\n\t\t\t[]byte{},\n\t\t\t\"45PJLL\",\n\t\t},\n\t\t{\n\t\t\tHex,\n\t\t\t[]byte{},\n\t\t\t\"0x7852b855\",\n\t\t},\n\t\t{\n\t\t\tCB58,\n\t\t\t[]byte{0},\n\t\t\t\"1c7hwa\",\n\t\t},\n\t\t{\n\t\t\tHex,\n\t\t\t[]byte{0},\n\t\t\t\"0x0017afa01d\",\n\t\t},\n\t\t{\n\t\t\tCB58,\n\t\t\t[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255},\n\t\t\t\"1NVSVezva3bAtJesnUj\",\n\t\t},\n\t\t{\n\t\t\tHex,\n\t\t\t[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255},\n\t\t\t\"0x00010203040506070809ff4482539c\",\n\t\t},\n\t\t{\n\t\t\tCB58,\n\t\t\tid[:],\n\t\t\t\"SkB92YpWm4Q2ijQHH34cqbKkCZWszsiQgHVjtNeFF2HdvDQU\",\n\t\t},\n\t\t{\n\t\t\tHex,\n\t\t\tid[:],\n\t\t\t\"0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20b7a612c9\",\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\t// Encode the bytes\n\t\tstrResult, err := Encode(test.encoding, test.bytes)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\t// Make sure the string repr. is what we expected\n\t\tassert.Equal(t, test.str, strResult)\n\t\t// Decode the string\n\t\tbytesResult, err := Decode(test.encoding, strResult)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\t// Make sure we got the same bytes back\n\t\tassert.DeepEqual(t, test.bytes, bytesResult)\n\t}\n}",
"func TestObjectEncryption(t *testing.T) {\n\tassert := assert.New(t)\n\n\tmessage := emptyStruct{}\n\n\tb, err := json.Marshal(message)\n\tassert.NoError(err)\n\n\tencrypted := EncryptString(\"enigma\", string(b), false)\n\n\tassert.Equal(\"IDjZE9BHSjcX67RddfCYYg==\", encrypted)\n}",
"func ByteToObj(data *[]byte, s interface{}) (bool, error) {\n\t//以下方法大数字不会被转换为科学计数法\n\td := json.NewDecoder(bytes.NewReader(*data))\n\td.UseNumber()\n\terr := d.Decode(&s)\n\t//以下方法大数字会显示科学计数法\n\t//err := json.Unmarshal(*data, s)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}",
"func serialize(obj interface{}) *bytes.Buffer {\n\tbuf := &bytes.Buffer{}\n\tif err := json.NewEncoder(buf).Encode(obj); err != nil {\n\t\t// if json encoding fails, stop the test immediately\n\t\tlog.Fatalf(\"unable to serialize obj: %v\", err)\n\t}\n\treturn buf\n}",
"func TestUnknownObjectWire(t *testing.T) {\n\texpires := time.Unix(0x495fab29, 0) // 2009-01-03 12:15:05 -0600 CST\n\tpayload := make([]byte, 128)\n\tmsgBase := wire.NewMsgUnknownObject(83928, expires, wire.ObjectType(22), 2, 1, payload)\n\n\ttests := []struct {\n\t\tin *wire.MsgUnknownObject // Message to encode\n\t\tout *wire.MsgUnknownObject // Expected decoded message\n\t\tbuf []byte // Wire encoding\n\t}{\n\t\t// Latest protocol version with multiple object vectors.\n\t\t{\n\t\t\tmsgBase,\n\t\t\tmsgBase,\n\t\t\tbaseUnknownObjectEncoded,\n\t\t},\n\t}\n\n\tt.Logf(\"Running %d tests\", len(tests))\n\tfor i, test := range tests {\n\t\t// Encode the message to wire.format.\n\t\tvar buf bytes.Buffer\n\t\terr := test.in.Encode(&buf)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Encode #%d error %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !bytes.Equal(buf.Bytes(), test.buf) {\n\t\t\tt.Errorf(\"Encode #%d\\n got: %s want: %s\", i,\n\t\t\t\tspew.Sdump(buf.Bytes()), spew.Sdump(test.buf))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Decode the message from wire.format.\n\t\tvar msg wire.MsgUnknownObject\n\t\trbuf := bytes.NewReader(test.buf)\n\t\terr = msg.Decode(rbuf)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Decode #%d error %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(&msg, test.out) {\n\t\t\tt.Errorf(\"Decode #%d\\n got: %s want: %s\", i,\n\t\t\t\tspew.Sdump(msg), spew.Sdump(test.out))\n\t\t\tcontinue\n\t\t}\n\t}\n}",
"func TestSerialization(t *testing.T) {\n\tvar err error\n\tvar seqa *Sequence\n\tvar seqb *Sequence\n\n\tseqa, err = New()\n\tif err != nil {\n\t\tt.Error(err.Error())\n\t}\n\n\t// Update sequence a bit\n\tfor i := 0; i < 93212; i++ {\n\t\tseqa.Next()\n\t}\n\n\t// Dump the data\n\tdata, err := seqa.Dump()\n\tif err != nil {\n\t\tt.Error(err.Error())\n\t}\n\n\t// Load the new sequence\n\tseqb = &Sequence{}\n\terr = seqb.Load(data)\n\tif err != nil {\n\t\tt.Error(err.Error())\n\t}\n\n\t// Compare the sequences\n\tif seqa.current != seqb.current || seqa.increment != seqb.increment || seqa.minvalue != seqb.minvalue || seqa.maxvalue != seqb.maxvalue {\n\t\tt.Error(\"loaded sequence does not match dumped sequence\")\n\t}\n\n\t// Ensure that seqb is initialzed\n\tif !seqb.IsStarted() {\n\t\tt.Error(\"loaded sequence isn't started?!\")\n\t}\n\n}",
"func (es *encodeState) encodeObject(obj reflect.Value, mapAsValue bool, format string, param interface{}) (object *pb.Object) {\n\tes.push(false, format, param)\n\tes.stats.Add(obj)\n\tes.stats.Start(obj)\n\n\tswitch obj.Kind() {\n\tcase reflect.Bool:\n\t\tobject = &pb.Object{Value: &pb.Object_BoolValue{obj.Bool()}}\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\tobject = &pb.Object{Value: &pb.Object_Int64Value{obj.Int()}}\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\tobject = &pb.Object{Value: &pb.Object_Uint64Value{obj.Uint()}}\n\tcase reflect.Float32, reflect.Float64:\n\t\tobject = &pb.Object{Value: &pb.Object_DoubleValue{obj.Float()}}\n\tcase reflect.Array:\n\t\tswitch obj.Type().Elem().Kind() {\n\t\tcase reflect.Uint8:\n\t\t\tobject = &pb.Object{Value: &pb.Object_ByteArrayValue{pbSlice(obj).Interface().([]byte)}}\n\t\tcase reflect.Uint16:\n\t\t\t// 16-bit slices are serialized as 32-bit slices.\n\t\t\t// See object.proto for details.\n\t\t\ts := pbSlice(obj).Interface().([]uint16)\n\t\t\tt := make([]uint32, len(s))\n\t\t\tfor i := range s {\n\t\t\t\tt[i] = uint32(s[i])\n\t\t\t}\n\t\t\tobject = &pb.Object{Value: &pb.Object_Uint16ArrayValue{&pb.Uint16S{Values: t}}}\n\t\tcase reflect.Uint32:\n\t\t\tobject = &pb.Object{Value: &pb.Object_Uint32ArrayValue{&pb.Uint32S{Values: pbSlice(obj).Interface().([]uint32)}}}\n\t\tcase reflect.Uint64:\n\t\t\tobject = &pb.Object{Value: &pb.Object_Uint64ArrayValue{&pb.Uint64S{Values: pbSlice(obj).Interface().([]uint64)}}}\n\t\tcase reflect.Uintptr:\n\t\t\tobject = &pb.Object{Value: &pb.Object_UintptrArrayValue{&pb.Uintptrs{Values: pbSlice(obj).Interface().([]uint64)}}}\n\t\tcase reflect.Int8:\n\t\t\tobject = &pb.Object{Value: &pb.Object_Int8ArrayValue{&pb.Int8S{Values: pbSlice(obj).Interface().([]byte)}}}\n\t\tcase reflect.Int16:\n\t\t\t// 16-bit slices are serialized as 32-bit slices.\n\t\t\t// See object.proto for details.\n\t\t\ts := pbSlice(obj).Interface().([]int16)\n\t\t\tt := make([]int32, len(s))\n\t\t\tfor i := range s {\n\t\t\t\tt[i] = int32(s[i])\n\t\t\t}\n\t\t\tobject = &pb.Object{Value: &pb.Object_Int16ArrayValue{&pb.Int16S{Values: t}}}\n\t\tcase reflect.Int32:\n\t\t\tobject = &pb.Object{Value: &pb.Object_Int32ArrayValue{&pb.Int32S{Values: pbSlice(obj).Interface().([]int32)}}}\n\t\tcase reflect.Int64:\n\t\t\tobject = &pb.Object{Value: &pb.Object_Int64ArrayValue{&pb.Int64S{Values: pbSlice(obj).Interface().([]int64)}}}\n\t\tcase reflect.Bool:\n\t\t\tobject = &pb.Object{Value: &pb.Object_BoolArrayValue{&pb.Bools{Values: pbSlice(obj).Interface().([]bool)}}}\n\t\tcase reflect.Float32:\n\t\t\tobject = &pb.Object{Value: &pb.Object_Float32ArrayValue{&pb.Float32S{Values: pbSlice(obj).Interface().([]float32)}}}\n\t\tcase reflect.Float64:\n\t\t\tobject = &pb.Object{Value: &pb.Object_Float64ArrayValue{&pb.Float64S{Values: pbSlice(obj).Interface().([]float64)}}}\n\t\tdefault:\n\t\t\tobject = &pb.Object{Value: &pb.Object_ArrayValue{es.encodeArray(obj)}}\n\t\t}\n\tcase reflect.Slice:\n\t\tif obj.IsNil() || obj.Cap() == 0 {\n\t\t\t// Handled specially in decode; store as nil value.\n\t\t\tobject = &pb.Object{Value: &pb.Object_RefValue{0}}\n\t\t} else {\n\t\t\t// Serialize a slice as the array plus length and capacity.\n\t\t\tobject = &pb.Object{Value: &pb.Object_SliceValue{&pb.Slice{\n\t\t\t\tCapacity: uint32(obj.Cap()),\n\t\t\t\tLength: uint32(obj.Len()),\n\t\t\t\tRefValue: es.register(arrayFromSlice(obj)),\n\t\t\t}}}\n\t\t}\n\tcase reflect.String:\n\t\tobject = &pb.Object{Value: &pb.Object_StringValue{[]byte(obj.String())}}\n\tcase reflect.Ptr:\n\t\tif obj.IsNil() {\n\t\t\t// Handled specially in decode; store as a nil value.\n\t\t\tobject = &pb.Object{Value: &pb.Object_RefValue{0}}\n\t\t} else {\n\t\t\tes.push(true /* dereference */, \"\", nil)\n\t\t\tobject = &pb.Object{Value: &pb.Object_RefValue{es.register(obj)}}\n\t\t\tes.pop()\n\t\t}\n\tcase reflect.Interface:\n\t\t// We don't check for IsNil here, as we want to encode type\n\t\t// information. The case of the empty interface (no type, no\n\t\t// value) is handled by encodeInteface.\n\t\tobject = &pb.Object{Value: &pb.Object_InterfaceValue{es.encodeInterface(obj)}}\n\tcase reflect.Struct:\n\t\tobject = &pb.Object{Value: &pb.Object_StructValue{es.encodeStruct(obj)}}\n\tcase reflect.Map:\n\t\tif obj.IsNil() {\n\t\t\t// Handled specially in decode; store as a nil value.\n\t\t\tobject = &pb.Object{Value: &pb.Object_RefValue{0}}\n\t\t} else if mapAsValue {\n\t\t\t// Encode the map directly.\n\t\t\tobject = &pb.Object{Value: &pb.Object_MapValue{es.encodeMap(obj)}}\n\t\t} else {\n\t\t\t// Encode a reference to the map.\n\t\t\t//\n\t\t\t// Remove the map object count here to avoid double\n\t\t\t// counting, as this object will be counted again when\n\t\t\t// it gets processed later. We do not add a reference\n\t\t\t// count as the reference is artificial.\n\t\t\tes.stats.Remove(obj)\n\t\t\tobject = &pb.Object{Value: &pb.Object_RefValue{es.register(obj)}}\n\t\t}\n\tdefault:\n\t\tpanic(fmt.Errorf(\"unknown primitive %#v\", obj.Interface()))\n\t}\n\n\tes.stats.Done()\n\tes.pop()\n\treturn\n}",
"func TestByteArrayEncoding(t *testing.T) {\n\ttype T struct {\n\t\tByteArray byteArray\n\t\tMap map[byteArray]int\n\t}\n\tx := T{\n\t\tByteArray: byteArray{string([]byte{0xff})}, // try a non-utf8 key\n\t\tMap: map[byteArray]int{\n\t\t\t{string([]byte{0xff})}: 3,\n\t\t},\n\t}\n\tbuf := encodeJSON(x)\n\n\texpectedString := `{\"ByteArray\":\"/w==\",\"Map\":{\"/w==\":3}}`\n\tassert.Equal(t, expectedString, string(buf))\n\n\tvar xx T\n\terr := DecodeJSON(buf, &xx)\n\trequire.NoError(t, err)\n\tassert.Equal(t, x, xx)\n}",
"func TestCurrencyEncoding(t *testing.T) {\n\tc := NewCurrency64(351)\n\tcMar := encoding.Marshal(c)\n\tvar cUmar Currency\n\terr := encoding.Unmarshal(cMar, &cUmar)\n\tif err != nil {\n\t\tt.Error(\"Error unmarshalling a currency:\", err)\n\t}\n\tif cUmar.Cmp(c) != 0 {\n\t\tt.Error(\"Marshalling and Unmarshalling a currency did not work correctly\")\n\t}\n}",
"func TestEncoder(t *testing.T) {\n\ttests := []struct {\n\t\tf encodeFunc // function to use to encode\n\t\tin interface{} // input value\n\t\twantBytes []byte // expected bytes\n\t\twantN int // expected number of bytes written\n\t\terr error // expected error\n\t}{\n\t\t// Bool\n\t\t{fEncodeBool, false, []byte{0x00, 0x00, 0x00, 0x00}, 4, nil},\n\t\t{fEncodeBool, true, []byte{0x00, 0x00, 0x00, 0x01}, 4, nil},\n\t\t// Expected Failure -- Short write\n\t\t{fEncodeBool, true, []byte{0x00, 0x00, 0x00}, 3, &MarshalError{ErrorCode: ErrIO}},\n\n\t\t// Double\n\t\t{fEncodeDouble, float64(0), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, 8, nil},\n\t\t{fEncodeDouble, float64(3.141592653589793), []byte{0x40, 0x09, 0x21, 0xfb, 0x54, 0x44, 0x2d, 0x18}, 8, nil},\n\t\t{fEncodeDouble, float64(math.Inf(-1)), []byte{0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, 8, nil},\n\t\t{fEncodeDouble, float64(math.Inf(0)), []byte{0x7F, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, 8, nil},\n\t\t// Expected Failure -- Short write\n\t\t{fEncodeDouble, float64(3.141592653589793), []byte{0x40, 0x09, 0x21, 0xfb, 0x54, 0x44, 0x2d}, 7, &MarshalError{ErrorCode: ErrIO}},\n\n\t\t// Enum\n\t\t{fEncodeEnum, int32(0), []byte{0x00, 0x00, 0x00, 0x00}, 4, nil},\n\t\t{fEncodeEnum, int32(1), []byte{0x00, 0x00, 0x00, 0x01}, 4, nil},\n\t\t// Expected Failures -- Invalid enum values\n\t\t{fEncodeEnum, int32(2), []byte{}, 0, &MarshalError{ErrorCode: ErrBadEnumValue}},\n\t\t{fEncodeEnum, int32(1234), []byte{}, 0, &MarshalError{ErrorCode: ErrBadEnumValue}},\n\n\t\t// FixedOpaque\n\t\t{fEncodeFixedOpaque, []byte{0x01}, []byte{0x01, 0x00, 0x00, 0x00}, 4, nil},\n\t\t{fEncodeFixedOpaque, []byte{0x01, 0x02}, []byte{0x01, 0x02, 0x00, 0x00}, 4, nil},\n\t\t{fEncodeFixedOpaque, []byte{0x01, 0x02, 0x03}, []byte{0x01, 0x02, 0x03, 0x00}, 4, nil},\n\t\t{fEncodeFixedOpaque, []byte{0x01, 0x02, 0x03, 0x04}, []byte{0x01, 0x02, 0x03, 0x04}, 4, nil},\n\t\t{fEncodeFixedOpaque, []byte{0x01, 0x02, 0x03, 0x04, 0x05}, []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x00, 0x00, 0x00}, 8, nil},\n\t\t// Expected Failure -- Short write\n\t\t{fEncodeFixedOpaque, []byte{0x01}, []byte{0x01, 0x00, 0x00}, 3, &MarshalError{ErrorCode: ErrIO}},\n\n\t\t// Float\n\t\t{fEncodeFloat, float32(0), []byte{0x00, 0x00, 0x00, 0x00}, 4, nil},\n\t\t{fEncodeFloat, float32(3.14), []byte{0x40, 0x48, 0xF5, 0xC3}, 4, nil},\n\t\t{fEncodeFloat, float32(1234567.0), []byte{0x49, 0x96, 0xB4, 0x38}, 4, nil},\n\t\t{fEncodeFloat, float32(math.Inf(-1)), []byte{0xFF, 0x80, 0x00, 0x00}, 4, nil},\n\t\t{fEncodeFloat, float32(math.Inf(0)), []byte{0x7F, 0x80, 0x00, 0x00}, 4, nil},\n\t\t// Expected Failure -- Short write\n\t\t{fEncodeFloat, float32(3.14), []byte{0x40, 0x48, 0xF5}, 3, &MarshalError{ErrorCode: ErrIO}},\n\n\t\t// Hyper\n\t\t{fEncodeHyper, int64(0), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, 8, nil},\n\t\t{fEncodeHyper, int64(1 << 34), []byte{0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00}, 8, nil},\n\t\t{fEncodeHyper, int64(1 << 42), []byte{0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00}, 8, nil},\n\t\t{fEncodeHyper, int64(9223372036854775807), []byte{0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 8, nil},\n\t\t{fEncodeHyper, int64(-1), []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 8, nil},\n\t\t{fEncodeHyper, int64(-9223372036854775808), []byte{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, 8, nil},\n\t\t// Expected Failure -- Short write\n\t\t{fEncodeHyper, int64(-9223372036854775808), []byte{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, 7, &MarshalError{ErrorCode: ErrIO}},\n\n\t\t// Int\n\t\t{fEncodeInt, int32(0), []byte{0x00, 0x00, 0x00, 0x00}, 4, nil},\n\t\t{fEncodeInt, int32(262144), []byte{0x00, 0x04, 0x00, 0x00}, 4, nil},\n\t\t{fEncodeInt, int32(2147483647), []byte{0x7F, 0xFF, 0xFF, 0xFF}, 4, nil},\n\t\t{fEncodeInt, int32(-1), []byte{0xFF, 0xFF, 0xFF, 0xFF}, 4, nil},\n\t\t{fEncodeInt, int32(-2147483648), []byte{0x80, 0x00, 0x00, 0x00}, 4, nil},\n\t\t// Expected Failure -- Short write\n\t\t{fEncodeInt, int32(2147483647), []byte{0x7F, 0xFF, 0xFF}, 3, &MarshalError{ErrorCode: ErrIO}},\n\n\t\t// Opaque\n\t\t{fEncodeOpaque, []byte{0x01}, []byte{0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00}, 8, nil},\n\t\t{fEncodeOpaque, []byte{0x01, 0x02, 0x03}, []byte{0x00, 0x00, 0x00, 0x03, 0x01, 0x02, 0x03, 0x00}, 8, nil},\n\t\t// Expected Failures -- Short write in length and payload\n\t\t{fEncodeOpaque, []byte{0x01}, []byte{0x00, 0x00, 0x00}, 3, &MarshalError{ErrorCode: ErrIO}},\n\t\t{fEncodeOpaque, []byte{0x01}, []byte{0x00, 0x00, 0x00, 0x01, 0x01}, 5, &MarshalError{ErrorCode: ErrIO}},\n\n\t\t// String\n\t\t{fEncodeString, \"\", []byte{0x00, 0x00, 0x00, 0x00}, 4, nil},\n\t\t{fEncodeString, \"xdr\", []byte{0x00, 0x00, 0x00, 0x03, 0x78, 0x64, 0x72, 0x00}, 8, nil},\n\t\t{fEncodeString, \"τ=2π\", []byte{0x00, 0x00, 0x00, 0x06, 0xCF, 0x84, 0x3D, 0x32, 0xCF, 0x80, 0x00, 0x00}, 12, nil},\n\t\t// Expected Failures -- Short write in length and payload\n\t\t{fEncodeString, \"xdr\", []byte{0x00, 0x00, 0x00}, 3, &MarshalError{ErrorCode: ErrIO}},\n\t\t{fEncodeString, \"xdr\", []byte{0x00, 0x00, 0x00, 0x03, 0x78}, 5, &MarshalError{ErrorCode: ErrIO}},\n\n\t\t// Uhyper\n\t\t{fEncodeUhyper, uint64(0), []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, 8, nil},\n\t\t{fEncodeUhyper, uint64(1 << 34), []byte{0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00}, 8, nil},\n\t\t{fEncodeUhyper, uint64(1 << 42), []byte{0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00}, 8, nil},\n\t\t{fEncodeUhyper, uint64(18446744073709551615), []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, 8, nil},\n\t\t{fEncodeUhyper, uint64(9223372036854775808), []byte{0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, 8, nil},\n\t\t// Expected Failure -- Short write\n\t\t{fEncodeUhyper, uint64(9223372036854775808), []byte{0x80}, 1, &MarshalError{ErrorCode: ErrIO}},\n\n\t\t// Uint\n\t\t{fEncodeUint, uint32(0), []byte{0x00, 0x00, 0x00, 0x00}, 4, nil},\n\t\t{fEncodeUint, uint32(262144), []byte{0x00, 0x04, 0x00, 0x00}, 4, nil},\n\t\t{fEncodeUint, uint32(4294967295), []byte{0xFF, 0xFF, 0xFF, 0xFF}, 4, nil},\n\t\t// Expected Failure -- Short write\n\t\t{fEncodeUint, uint32(262144), []byte{0x00, 0x04, 0x00}, 3, &MarshalError{ErrorCode: ErrIO}},\n\t}\n\n\tvalidEnums := make(map[int32]bool)\n\tvalidEnums[0] = true\n\tvalidEnums[1] = true\n\n\tvar err error\n\tvar n int\n\n\tfor i, test := range tests {\n\t\terr = nil\n\t\tdata := newFixedWriter(test.wantN)\n\t\tenc := NewEncoder(data)\n\t\tswitch test.f {\n\t\tcase fEncodeBool:\n\t\t\tin := test.in.(bool)\n\t\t\tn, err = enc.EncodeBool(in)\n\t\tcase fEncodeDouble:\n\t\t\tin := test.in.(float64)\n\t\t\tn, err = enc.EncodeDouble(in)\n\t\tcase fEncodeEnum:\n\t\t\tin := test.in.(int32)\n\t\t\tn, err = enc.EncodeEnum(in, validEnums)\n\t\tcase fEncodeFixedOpaque:\n\t\t\tin := test.in.([]byte)\n\t\t\tn, err = enc.EncodeFixedOpaque(in)\n\t\tcase fEncodeFloat:\n\t\t\tin := test.in.(float32)\n\t\t\tn, err = enc.EncodeFloat(in)\n\t\tcase fEncodeHyper:\n\t\t\tin := test.in.(int64)\n\t\t\tn, err = enc.EncodeHyper(in)\n\t\tcase fEncodeInt:\n\t\t\tin := test.in.(int32)\n\t\t\tn, err = enc.EncodeInt(in)\n\t\tcase fEncodeOpaque:\n\t\t\tin := test.in.([]byte)\n\t\t\tn, err = enc.EncodeOpaque(in)\n\t\tcase fEncodeString:\n\t\t\tin := test.in.(string)\n\t\t\tn, err = enc.EncodeString(in)\n\t\tcase fEncodeUhyper:\n\t\t\tin := test.in.(uint64)\n\t\t\tn, err = enc.EncodeUhyper(in)\n\t\tcase fEncodeUint:\n\t\t\tin := test.in.(uint32)\n\t\t\tn, err = enc.EncodeUint(in)\n\t\tdefault:\n\t\t\tt.Errorf(\"%v #%d unrecognized function\", test.f, i)\n\t\t\tcontinue\n\t\t}\n\n\t\t// First ensure the number of bytes written is the expected\n\t\t// value and the error is the expected one.\n\t\ttestName := fmt.Sprintf(\"%v #%d\", test.f, i)\n\t\ttestExpectedMRet(t, testName, n, test.wantN, err, test.err)\n\n\t\t// Finally, ensure the written bytes are what is expected.\n\t\trv := data.Bytes()\n\t\tif len(rv) != len(test.wantBytes) {\n\t\t\tt.Errorf(\"%s: unexpected len - got: %v want: %v\\n\",\n\t\t\t\ttestName, len(rv), len(test.wantBytes))\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(rv, test.wantBytes) {\n\t\t\tt.Errorf(\"%s: unexpected result - got: %v want: %v\\n\",\n\t\t\t\ttestName, rv, test.wantBytes)\n\t\t\tcontinue\n\t\t}\n\t}\n}",
"func TestGzipCoder(t *testing.T) {\n\tb := []byte(\"gzip encode and decode test.\")\n\tt.Logf(\"source bytes size: %d\\n\", len(b))\n\n\tt.Log(\"Case01: test gzip encode bytes.\")\n\tencodeB, err := myutils.GzipEncode(b)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed gzip encode: %v\\n\", err)\n\t}\n\tt.Logf(\"gzip encode bytes size: %d\\n\", len(encodeB))\n\n\tt.Log(\"Case02: test gzip decode bytes.\")\n\tdecodeB, err := myutils.GzipDecode(encodeB)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed gzip decode: %v\\n\", err)\n\t}\n\tt.Logf(\"gzip decode bytes size: %d\\n\", len(decodeB))\n\n\tif len(b) != len(decodeB) {\n\t\tt.Errorf(\"decode bytes size not matched: src=%d,decode=%d\\n\", len(b), len(decodeB))\n\t}\n\tif string(b) != string(decodeB) {\n\t\tt.Error(\"decode text not matched.\")\n\t}\n\tt.Logf(\"decode text: %s\\n\", string(decodeB))\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test server creation and serving | func TestServerServe(t *testing.T) {
// Create server
server, _ := NewServer[any, any]()
assert(!server.Serving(), t, "Server should not be serving")
// Start server
err := server.Start("0.0.0.0", 0)
assertNoErr(err, t)
assert(server.Serving(), t, "Server should be serving")
time.Sleep(waitTime)
// Check server address info
host, port, err := server.GetAddr()
assertNoErr(err, t)
assert(server.sock.Addr().String() == host+":"+strconv.Itoa(int(port)), t, "Address strings don't match")
fmt.Printf("Server address: %s:%d\n", host, port)
// Stop server
err = server.Stop()
assertNoErr(err, t)
assert(!server.Serving(), t, "Server should not be serving")
time.Sleep(waitTime)
} | [
"func TestMultiRunningServers_v2(t *testing.T) {\n\tdomain := \"mydomain.com\"\n\thost := domain + \":443\"\n\tinitDefault()\n\tConfig.DisableBanner = true\n\tConfig.Tester.ListeningAddr = host\n\t// create the key and cert files on the fly, and delete them when this test finished\n\tcertFile, ferr := ioutil.TempFile(\"\", \"cert\")\n\n\tif ferr != nil {\n\t\tt.Fatal(ferr.Error())\n\t}\n\n\tkeyFile, ferr := ioutil.TempFile(\"\", \"key\")\n\tif ferr != nil {\n\t\tt.Fatal(ferr.Error())\n\t}\n\n\tcertFile.WriteString(testTLSCert)\n\tkeyFile.WriteString(testTLSKey)\n\n\tdefer func() {\n\t\tcertFile.Close()\n\t\ttime.Sleep(350 * time.Millisecond)\n\t\tos.Remove(certFile.Name())\n\n\t\tkeyFile.Close()\n\t\ttime.Sleep(350 * time.Millisecond)\n\t\tos.Remove(keyFile.Name())\n\t}()\n\n\tGet(\"/\", func(ctx *Context) {\n\t\tctx.Write(\"Hello from %s\", ctx.HostString())\n\t})\n\n\t// add a secondary server\n\tServers.Add(config.Server{ListeningAddr: domain + \":80\", RedirectTo: \"https://\" + host, Virtual: true})\n\t// add our primary/main server\n\tServers.Add(config.Server{ListeningAddr: host, CertFile: certFile.Name(), KeyFile: keyFile.Name(), Virtual: true})\n\n\tgo Go()\n\n\t// prepare test framework\n\tif ok := <-Available; !ok {\n\t\tt.Fatal(\"Unexpected error: server cannot start, please report this as bug!!\")\n\t}\n\n\te := Tester(t)\n\n\te.Request(\"GET\", \"http://\"+domain+\":80\").Expect().Status(StatusOK).Body().Equal(\"Hello from \" + host)\n\te.Request(\"GET\", \"https://\"+host).Expect().Status(StatusOK).Body().Equal(\"Hello from \" + host)\n\n}",
"func newTestServer(t *testing.T) *httptest.Server {\n\tcamroot, err := osutil.GoPackagePath(\"camlistore.org\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to find camlistore.org GOPATH root: %v\", err)\n\t}\n\n\tconf := serverconfig.Config{\n\t\tListen: \":3179\",\n\t\tHTTPS: false,\n\t\tAuth: \"localhost\",\n\t\tIdentity: \"26F5ABDA\",\n\t\tIdentitySecretRing: filepath.Join(camroot, filepath.FromSlash(\"pkg/jsonsign/testdata/test-secring.gpg\")),\n\t\tMemoryStorage: true,\n\t\tMemoryIndex: true,\n\t}\n\n\tconfData, err := json.MarshalIndent(conf, \"\", \" \")\n\tif err != nil {\n\t\tt.Fatalf(\"Could not json encode config: %v\", err)\n\t}\n\n\t// Setting CAMLI_CONFIG_DIR to avoid triggering failInTests in osutil.CamliConfigDir\n\tdefer os.Setenv(\"CAMLI_CONFIG_DIR\", os.Getenv(\"CAMLI_CONFIG_DIR\")) // restore after test\n\tos.Setenv(\"CAMLI_CONFIG_DIR\", \"whatever\")\n\tlowConf, err := serverinit.Load(confData)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// because these two are normally consumed in camlistored.go\n\t// TODO(mpl): serverinit.Load should consume these 2 as well. Once\n\t// consumed, we should keep all the answers as private fields, and then we\n\t// put accessors on serverinit.Config. Maybe we even stop embedding\n\t// jsonconfig.Obj in serverinit.Config too, so none of those methods are\n\t// accessible.\n\tlowConf.OptionalBool(\"https\", true)\n\tlowConf.OptionalString(\"listen\", \"\")\n\n\treindex := false\n\tvar context *http.Request // only used by App Engine. See handlerLoader in serverinit.go\n\thi := http.NewServeMux()\n\taddress := \"http://\" + conf.Listen\n\t_, err = lowConf.InstallHandlers(hi, address, reindex, context)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treturn httptest.NewServer(hi)\n}",
"func TestMultiRunningServers_v1(t *testing.T) {\n\thost := \"mydomain.com:443\" // you have to add it to your hosts file( for windows, as 127.0.0.1 mydomain.com)\n\tinitDefault()\n\tConfig.DisableBanner = true\n\t// create the key and cert files on the fly, and delete them when this test finished\n\tcertFile, ferr := ioutil.TempFile(\"\", \"cert\")\n\n\tif ferr != nil {\n\t\tt.Fatal(ferr.Error())\n\t}\n\n\tkeyFile, ferr := ioutil.TempFile(\"\", \"key\")\n\tif ferr != nil {\n\t\tt.Fatal(ferr.Error())\n\t}\n\n\tcertFile.WriteString(testTLSCert)\n\tkeyFile.WriteString(testTLSKey)\n\n\tdefer func() {\n\t\tcertFile.Close()\n\t\ttime.Sleep(350 * time.Millisecond)\n\t\tos.Remove(certFile.Name())\n\n\t\tkeyFile.Close()\n\t\ttime.Sleep(350 * time.Millisecond)\n\t\tos.Remove(keyFile.Name())\n\t}()\n\n\tGet(\"/\", func(ctx *Context) {\n\t\tctx.Write(\"Hello from %s\", ctx.HostString())\n\t})\n\n\t// start the secondary server\n\tAddServer(config.Server{ListeningAddr: \"mydomain.com:80\", RedirectTo: \"https://\" + host, Virtual: true})\n\t// start the main server\n\tgo ListenTo(config.Server{ListeningAddr: host, CertFile: certFile.Name(), KeyFile: keyFile.Name(), Virtual: true})\n\t// prepare test framework\n\tif ok := <-Available; !ok {\n\t\tt.Fatal(\"Unexpected error: server cannot start, please report this as bug!!\")\n\t}\n\n\te := Tester(t)\n\n\te.Request(\"GET\", \"http://mydomain.com:80\").Expect().Status(StatusOK).Body().Equal(\"Hello from \" + host)\n\te.Request(\"GET\", \"https://\"+host).Expect().Status(StatusOK).Body().Equal(\"Hello from \" + host)\n\n}",
"func NewServer(handler http.Handler) *httptest.Server",
"func TestServe(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tin string\n\t\tout string\n\t}{\n\t\t{\n\t\t\tname: \"Echo the message back to the client\",\n\t\t\tin: \"hello\",\n\t\t\tout: \"hello\",\n\t\t},\n\t\t{\n\t\t\tname: \"Limit to 140 characters\",\n\t\t\tin: fmt.Sprintf(\"%s%s\", text140chars, \"and even more\"),\n\t\t\tout: text140chars,\n\t\t},\n\t}\n\tport := 16123\n\tfor _, tt := range tests {\n\t\tport++\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\ts := NewServer(fmt.Sprintf(\"localhost:%d\", port), certFile, keyFile, nil, nil, nil)\n\t\t\tready := make(chan struct{})\n\t\t\tgo func(ready <-chan struct{}, in, out string) {\n\t\t\t\t<-ready\n\n\t\t\t\tclient := buildHttpsClient(t, caFile)\n\t\t\t\ttarget := fmt.Sprintf(\"https://%s\", s.address)\n\t\t\t\tresp, err := client.Post(target, \"text/plain\", strings.NewReader(in))\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"Can't connect to server = %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer resp.Body.Close()\n\n\t\t\t\tbytes, err := io.ReadAll(resp.Body)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"Can't read the response = %v\", err)\n\t\t\t\t} else if got := string(bytes); got != out {\n\t\t\t\t\tt.Errorf(\"got %s, but want %s\", got, out)\n\t\t\t\t}\n\n\t\t\t\tif err := s.Shutdown(); err != nil {\n\t\t\t\t\tt.Errorf(\"Error shutting down = %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}(ready, tt.in, tt.out)\n\t\t\tif err := s.Serve(ready); err != nil && !strings.Contains(err.Error(), \"http: Server closed\") {\n\t\t\t\tt.Errorf(\"Serve() error = %v\", err)\n\t\t\t}\n\t\t})\n\t}\n}",
"func createDummyServer() (*httptest.Server, string, string) {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == constants.SOFTWARE_VERSION_CHECK_API_PATH {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\tw.Write([]byte(softwareVersionResp))\n\t\t}\n\n\t\tif r.URL.Path == constants.SYSTEM_RESOURCE_CHECK_API_PATH {\n\n\t\t\treqParameters := r.URL.Query()\n\t\t\tnodeType := reqParameters.Get(\"node_type\")\n\t\t\tif nodeType == constants.OPENSEARCH {\n\t\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t\tw.Write([]byte(osResourceCheck))\n\t\t\t}\n\t\t\tif nodeType == constants.POSTGRESQL {\n\t\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t\tw.Write([]byte(pgResourceCheck))\n\t\t\t}\n\t\t\tif nodeType == constants.BASTION {\n\t\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t\tw.Write([]byte(bastionResourceCheck))\n\t\t\t}\n\n\t\t\tif nodeType == constants.AUTOMATE {\n\t\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t\tw.Write([]byte(automateResourceCheck))\n\t\t\t}\n\t\t}\n\n\t\tif r.URL.Path == constants.SYSTEM_USER_CHECK_API_PATH {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\tw.Write([]byte(systemUser))\n\t\t}\n\t}))\n\n\t// Extract IP and port from the server's URL\n\taddress := server.URL[strings.Index(server.URL, \"//\")+2:]\n\tcolonIndex := strings.Index(address, \":\")\n\tip := address[:colonIndex]\n\tport := address[colonIndex+1:]\n\treturn server, ip, port\n}",
"func TestServer(t *testing.T) {\n\n\ttestUsername := \"username1\"\n\ttestPassword := \"password1\"\n\ttestMetadata := \"test metadata\"\n\n\ts, err := newServer(migp.DefaultServerConfig())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\thttpServer := httptest.NewServer(s.handler())\n\tdefer httpServer.Close()\n\n\tcfg := migp.DefaultConfig()\n\n\t// query test record before insertion\n\tstatus, metadata, err := migp.Query(cfg, httpServer.URL+\"/evaluate\", testUsername, testPassword)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif status != migp.NotInBreach {\n\t\tt.Fatalf(\"status: want %s, got %s\", migp.NotInBreach, status)\n\t}\n\tif string(metadata) != \"\" {\n\t\tt.Fatalf(\"metadata: want %s, got %s\", \"\", string(metadata))\n\t}\n\n\t// insert test record\n\terr = s.insert(testUsername, testPassword, testMetadata, 9, true)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// query test record after insertion\n\tstatus, metadata, err = migp.Query(cfg, httpServer.URL+\"/evaluate\", testUsername, testPassword)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif status != migp.InBreach {\n\t\tt.Fatalf(\"status: want %s, got %s\", migp.InBreach, status)\n\t}\n\tif string(metadata) != testMetadata {\n\t\tt.Fatalf(\"metadata: want %s, got %s\", testMetadata, string(metadata))\n\t}\n}",
"func TestRcServer(t *testing.T) {\n\topt := rc.DefaultOpt\n\topt.HTTP.ListenAddr = []string{testBindAddress}\n\topt.Template.Path = testTemplate\n\topt.Enabled = true\n\topt.Serve = true\n\topt.Files = testFs\n\tmux := http.NewServeMux()\n\trcServer, err := newServer(context.Background(), &opt, mux)\n\trequire.NoError(t, err)\n\tassert.NoError(t, rcServer.Serve())\n\tdefer func() {\n\t\tassert.NoError(t, rcServer.Shutdown())\n\t\trcServer.Wait()\n\t}()\n\ttestURL := rcServer.server.URLs()[0]\n\n\t// Do the simplest possible test to check the server is alive\n\t// Do it a few times to wait for the server to start\n\tvar resp *http.Response\n\tfor i := 0; i < 10; i++ {\n\t\tresp, err = http.Get(testURL + \"file.txt\")\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(10 * time.Millisecond)\n\t}\n\n\trequire.NoError(t, err)\n\tbody, err := io.ReadAll(resp.Body)\n\t_ = resp.Body.Close()\n\n\trequire.NoError(t, err)\n\trequire.NoError(t, resp.Body.Close())\n\n\tassert.Equal(t, http.StatusOK, resp.StatusCode)\n\tassert.Equal(t, \"this is file1.txt\\n\", string(body))\n}",
"func ServerTests() {\n\tcheckRestAPIEntryPoint()\n\tcheckNonExistentEntryPoint()\n\tcheckWrongEntryPoint()\n\tcheckWrongMethodsForEntryPoint()\n}",
"func TestServer(data string) *httptest.Server {\n\treturn httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {\n\t\t_, err := rw.Write([]byte(data))\n\t\tif err != nil {\n\t\t\tlog.Log().Fatal(\"can't write to buffer:\", err)\n\t\t}\n\t}))\n}",
"func TestNewServer(t *testing.T) {\n\t_ = newTestDBConnection()\n}",
"func TestNetServerRunning(t *testing.T) {\n\tconn, err := net.Dial(\"tcp\", \":8080\")\n\tif err != nil {\n\t\tt.Error(\"could not connect to server: \", err)\n\t}\n\tdefer conn.Close()\n}",
"func newTestServer() *testServer {\n\tsrv := &testServer{\n\t\tseen: make(map[string]*requestStatus),\n\t\ttotal: atomic.NewUint64(0),\n\t\taccepted: atomic.NewUint64(0),\n\t\tretried: atomic.NewUint64(0),\n\t\tfailed: atomic.NewUint64(0),\n\t\tpeak: atomic.NewInt64(0),\n\t\tactive: atomic.NewInt64(0),\n\t}\n\tsrv.server = httptest.NewServer(srv)\n\tsrv.URL = srv.server.URL\n\treturn srv\n}",
"func createServer(serverName string, serverPort string) *http.Server {\n\tmux := http.NewServeMux()\n\tfmt.Printf(\"Creating server %s, on port: %s\\n\", serverName, serverPort[1:])\n\tmux.HandleFunc(\"/\", printPort)\n\tserverObj := http.Server{\n\t\tAddr: serverPort,\n\t\tHandler: mux,\n\t}\n\treturn &serverObj\n}",
"func newTest3CServer(customPath string, customFunc func(w http.ResponseWriter, r *http.Request)) (*httptest.Server, error) {\n\trtr := mux.NewRouter()\n\n\tdealFunc := func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(http.StatusCreated)\n\t}\n\n\tif customFunc != nil {\n\t\tdealFunc = customFunc\n\t}\n\n\tif customPath != StartNewDealPath {\n\t\trtr.HandleFunc(StartNewDealPath, dealFunc)\n\t}\n\n\tif customFunc != nil {\n\t\trtr.HandleFunc(customPath, customFunc)\n\t}\n\treturn httptest.NewServer(rtr), nil\n}",
"func TestServer(t *testing.T) {\n\n\tport, err := freeport.GetFreePort()\n\tif err != nil {\n\t\tt.Fatal(\"Can't get a free tcp port\")\n\t}\n\thost := \"127.0.0.1\"\n\tpath := \"\"\n\tuidGenerator := uidgenerator.New(\n\t\t&uidgenerator.Cfg{\n\t\t\tAlfa: \"1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\",\n\t\t\tFormat: \"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\",\n\t\t\tValidator: \"[0-9a-zA-Z]{32}\",\n\t\t},\n\t)\n\tuid := uidGenerator.New()\n\tfilename := path + uid + \".html\"\n\n\tdStorage := NewDummyDataStorage()\n\ttStorage := NewDummyTemplateStorage()\n\n\ttmpl, err := tStorage.Template(path)\n\tif tmpl == nil || err != nil {\n\t\tt.Fatalf(\"Null template has been returned for path %s, %v\", path, err)\n\t}\n\n\tdata, _, _, err := dStorage.Get(uid)\n\n\tbuff := bytes.NewBuffer(make([]byte, 0))\n\terr = tmpl.Execute(buff, data)\n\tif err != nil {\n\t\tt.Fatalf(\"Template execute error: %v\", err)\n\t}\n\tgenerated := buff.Bytes()\n\n\tconfiguration := &Opts{\n\t\tFtpOpts: &core.ServerOpts{Port: port, Hostname: host},\n\t\tTemplateStorage: tStorage,\n\t\tDataStorage: dStorage,\n\t\tUidGenerator: uidGenerator,\n\t\tLogFtpDebug: true,\n\t}\n\tftpd := New(configuration)\n\n\tcloseCh := make(chan error)\n\n\tgo func() {\n\t\terr = ftpd.ListenAndServe()\n\t\tif err != nil {\n\t\t\tcloseCh <- err\n\t\t}\n\t\tclose(closeCh)\n\t}()\n\n\t//wait for server became ready\n\tselect {\n\tcase err := <-closeCh:\n\t\tt.Fatalf(\"Can't start ftp server: %v\", err)\n\t//it's quite unreliable to use timeouts, but, it is simple and reasonable in this case\n\tcase <-time.After(time.Second):\n\n\t}\n\n\tdefer func() {\n\t\t_ = ftpd.Shutdown()\n\t\t//wait for server shutdown ready\n\t\tselect {\n\t\tcase <-closeCh:\n\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Fatalf(\"Server didn't stop after waiting timeout: %v\", err)\n\t\t}\n\t}()\n\n\tdownloaded, err := downloadFile(host+\":\"+strconv.Itoa(port), filename)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !bytes.Equal(downloaded, generated) {\n\t\tt.Fatal(\"FTP server returns a wrong file content\")\n\t\treturn\n\t}\n}",
"func TestStaticWebServer2(t *testing.T) {\n\tcontent := []byte(`{\"a\":\"json-document\"}`)\n\tcontentType := \"application/json\"\n\tserver := NewStaticWebServer(WebServerOptions{Addr: testAddr}, content, http.StatusOK, map[string]string{\"Content-Type\": contentType})\n\tdefer stopper(server, t)\n\tif err := server.Start(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tresponse, err := http.Get(fmt.Sprintf(\"http://%s/\", server.Addr()))\n\t// Invoke again to workaround weird issue where during the first request the\n\t// handler previously defined in Test_StaticWebServer1 is invoked before\n\t// ours.\n\tresponse, err = http.Get(fmt.Sprintf(\"http://%s/\", server.Addr()))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Verify response status code.\n\tif response.StatusCode != http.StatusOK {\n\t\tt.Errorf(\"Expected status code=%v but instead got status code=%v\", http.StatusOK, response.StatusCode)\n\t}\n\t// Verify response content-type header.\n\tfoundContentType := response.Header.Get(\"Content-Type\")\n\tif foundContentType != contentType {\n\t\tt.Errorf(\"Expected Content-Type header=%s but instead header=%s\", contentType, foundContentType)\n\t}\n\t// Verify body content.\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif string(body) != string(content) {\n\t\tt.Errorf(`Expected body to be \"%s\" but instead found \"%s\"`, string(content), string(body))\n\t}\n}",
"func ControlServer(BasePort int) {\n\tcontrolPort := strconv.Itoa(BasePort + 0)\n\n\thttp.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprintf(w, \"OK\\n\")\n\t})\n\n\thttp.HandleFunc(\"/stop\", func(w http.ResponseWriter, r *http.Request) {\n\t\texistenceIsPain()\n\t})\n\n\thttp.HandleFunc(\"/uninstall\", func(w http.ResponseWriter, r *http.Request) {\n\t\tex, err := os.Executable()\n\t\tif err != nil {\n\t\t\texistenceIsPain()\n\t\t}\n\t\tos.Remove(ex)\n\t\texistenceIsPain()\n\t})\n\n\thttp.HandleFunc(\"/hardware\", func(w http.ResponseWriter, r *http.Request) {\n\t\t// No Error Handling I Know But It Will Be Fine XD\n\t\trelease := osinfo.GetVersion()\n\t\tmemory, _ := ghw.Memory()\n\t\tblock, _ := ghw.Block()\n\t\tgpu, _ := ghw.GPU()\n\t\tcpu, _ := ghw.CPU()\n\n\t\t// Set Values for JSON Return\n\t\tinfo := hardware{}\n\t\tfor _, proc := range cpu.Processors {\n\t\t\tinfo.CPU = proc.Model\n\t\t}\n\t\tfor _, vc := range gpu.GraphicsCards {\n\t\t\tinfo.GPU = vc.DeviceInfo.Product.Name\n\t\t}\n\n\t\tinfo.Runtime = release.Runtime\n\t\tinfo.OSArch = release.Arch\n\t\tinfo.OSName = release.Name\n\t\tinfo.OSVersion = release.Version\n\n\t\tinfo.Cores = cpu.TotalThreads\n\t\tinfo.RAM = memory.String()\n\t\tinfo.Drives = block.String()\n\n\t\t// Encode and Return Struct as JSON\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tjson.NewEncoder(w).Encode(info)\n\n\t})\n\n\t// Start a Simple File Server on `/fs/`\n\tfs := http.FileServer(http.Dir(getPaths()))\n\thttp.Handle(\"/fs/\", http.StripPrefix(\"/fs/\", fs))\n\thttp.ListenAndServe(\"127.0.0.1:\"+controlPort, nil)\n}",
"func Server() {\n datastore.Start()\n router := gin.Default()\n web.ExposeRoutes(router)\n restAPI := router.Group(\"/api\")\n api.ExposeRoutes(restAPI)\n router.Run(fmt.Sprintf(\":%v\", config.GetConfig(\"PORT\")))\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test getting server and client addresses | func TestAddresses(t *testing.T) {
// Create server
server, serverEvent := NewServer[any, any]()
assert(!server.Serving(), t, "Server should not be serving")
// Start server
err := server.Start("127.0.0.1", 0)
assertNoErr(err, t)
assert(server.Serving(), t, "Server should be serving")
time.Sleep(waitTime)
// Check server address info
host, port, err := server.GetAddr()
assertNoErr(err, t)
assert(server.sock.Addr().String() == host+":"+strconv.Itoa(int(port)), t, "Address strings don't match")
fmt.Printf("Server address: %s:%d\n", host, port)
// Create client
client, _ := NewClient[any, any]()
assert(!client.Connected(), t, "Client should not be connected")
// Connect to server
err = client.Connect(host, port)
assertNoErr(err, t)
assert(client.Connected(), t, "Client should be connected")
time.Sleep(waitTime)
// Check client address info
host, port, err = client.GetAddr()
assertNoErr(err, t)
assert(client.sock.LocalAddr().String() == host+":"+strconv.Itoa(int(port)), t, "Address strings don't match")
fmt.Printf("Client address: %s:%d\n", host, port)
// Check connect event was received
clientConnectEvent := <-serverEvent
assertEq(clientConnectEvent, ServerEvent[any]{
EventType: ServerConnect,
ClientID: 0,
}, t)
// Check that addresses match
host1, port1, err := client.GetAddr()
assertNoErr(err, t)
host2, port2, err := server.GetClientAddr(0)
assertNoErr(err, t)
assert(host1 == host2, t, "Client hosts do not match")
assert(port1 == port2, t, "Client ports do not match")
host3, port3, err := client.GetServerAddr()
assertNoErr(err, t)
host4, port4, err := server.GetAddr()
assertNoErr(err, t)
assert(host3 == host4, t, "Server hosts do not match")
assert(port3 == port4, t, "Server ports do not match")
// Disconnect from server
err = client.Disconnect()
assertNoErr(err, t)
time.Sleep(waitTime)
// Check disconnect event was received
clientDisconnectEvent := <-serverEvent
assertEq(clientDisconnectEvent, ServerEvent[any]{
EventType: ServerDisconnect,
ClientID: 0,
}, t)
// Stop server
err = server.Stop()
assertNoErr(err, t)
assert(!server.Serving(), t, "Server should not be serving")
time.Sleep(waitTime)
} | [
"func (f *FakeServer) GetAddr() (string, error) { return \"\", nil }",
"func TestMultipleClients(t *testing.T) {\n\t// Create server\n\tserver, serverEvent := NewServer[int, string]()\n\tassert(!server.Serving(), t, \"Server should not be serving\")\n\n\t// Start server\n\terr := server.Start(\"127.0.0.1\", 0)\n\tassertNoErr(err, t)\n\tassert(server.Serving(), t, \"Server should be serving\")\n\ttime.Sleep(waitTime)\n\n\t// Check server address info\n\thost, port, err := server.GetAddr()\n\tassertNoErr(err, t)\n\tassert(server.sock.Addr().String() == host+\":\"+strconv.Itoa(int(port)), t, \"Address strings don't match\")\n\tfmt.Printf(\"Server address: %s:%d\\n\", host, port)\n\n\t// Create client\n\tclient1, clientEvent1 := NewClient[string, int]()\n\tassert(!client1.Connected(), t, \"Client should not be connected\")\n\n\t// Connect to server\n\terr = client1.Connect(host, port)\n\tassertNoErr(err, t)\n\tassert(client1.Connected(), t, \"Client should be connected\")\n\ttime.Sleep(waitTime)\n\n\t// Check client address info\n\tclientHost1, clientPort1, err := client1.GetAddr()\n\tassertNoErr(err, t)\n\tassert(client1.sock.LocalAddr().String() == clientHost1+\":\"+strconv.Itoa(int(clientPort1)), t, \"Address strings don't match\")\n\tfmt.Printf(\"Client address: %s:%d\\n\", clientHost1, clientPort1)\n\n\t// Check connect event was received\n\tclientConnectEvent1 := <-serverEvent\n\tassertEq(clientConnectEvent1, ServerEvent[string]{\n\t\tEventType: ServerConnect,\n\t\tClientID: 0,\n\t}, t)\n\n\t// Check that first client addresses match\n\thost1, port1, err := client1.GetAddr()\n\tassertNoErr(err, t)\n\thost2, port2, err := server.GetClientAddr(0)\n\tassertNoErr(err, t)\n\tassert(host1 == host2, t, \"Client 1 hosts do not match\")\n\tassert(port1 == port2, t, \"Client 1 ports do not match\")\n\thost3, port3, err := client1.GetServerAddr()\n\tassertNoErr(err, t)\n\thost4, port4, err := server.GetAddr()\n\tassertNoErr(err, t)\n\tassert(host3 == host4, t, \"Server hosts do not match\")\n\tassert(port3 == port4, t, \"Server ports do not match\")\n\n\t// Create client\n\tclient2, clientEvent2 := NewClient[string, int]()\n\tassert(!client2.Connected(), t, \"Client should not be connected\")\n\n\t// Connect to server\n\terr = client2.Connect(host, port)\n\tassertNoErr(err, t)\n\tassert(client2.Connected(), t, \"Client should be connected\")\n\ttime.Sleep(waitTime)\n\n\t// Check client address info\n\tclientHost2, clientPort2, err := client2.GetAddr()\n\tassertNoErr(err, t)\n\tassert(client2.sock.LocalAddr().String() == clientHost2+\":\"+strconv.Itoa(int(clientPort2)), t, \"Address strings don't match\")\n\tfmt.Printf(\"Client address: %s:%d\\n\", clientHost2, clientPort2)\n\n\t// Check connect event was received\n\tclientConnectEvent2 := <-serverEvent\n\tassertEq(clientConnectEvent2, ServerEvent[string]{\n\t\tEventType: ServerConnect,\n\t\tClientID: 1,\n\t}, t)\n\n\t// Check that second client addresses match\n\thost5, port5, err := client2.GetAddr()\n\tassertNoErr(err, t)\n\thost6, port6, err := server.GetClientAddr(1)\n\tassertNoErr(err, t)\n\tassert(host5 == host6, t, \"Client 2 hosts do not match\")\n\tassert(port5 == port6, t, \"Client 2 ports do not match\")\n\thost7, port7, err := client2.GetServerAddr()\n\tassertNoErr(err, t)\n\thost8, port8, err := server.GetAddr()\n\tassertNoErr(err, t)\n\tassert(host7 == host8, t, \"Server hosts do not match\")\n\tassert(port7 == port8, t, \"Server ports do not match\")\n\n\t// Send message from client 1\n\tmessageFromClient1 := \"Hello from client 1\"\n\terr = client1.Send(messageFromClient1)\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Receive message from client 1\n\tserverMessageFromClient1 := <-serverEvent\n\tassertEq(serverMessageFromClient1, ServerEvent[string]{\n\t\tEventType: ServerReceive,\n\t\tClientID: 0,\n\t\tData: messageFromClient1,\n\t}, t)\n\n\t// Send response back to client 1\n\terr = server.Send(len(serverMessageFromClient1.Data), serverMessageFromClient1.ClientID)\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Client 1 receive response\n\tserverReplyEvent1 := <-clientEvent1\n\tassertEq(serverReplyEvent1, ClientEvent[int]{\n\t\tEventType: ClientReceive,\n\t\tData: len(messageFromClient1),\n\t}, t)\n\n\t// Send message from client 2\n\tmessageFromClient2 := \"Hello from client 2\"\n\terr = client2.Send(messageFromClient2)\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Receive message from client 2\n\tserverMessageFromClient2 := <-serverEvent\n\tassertEq(serverMessageFromClient2, ServerEvent[string]{\n\t\tEventType: ServerReceive,\n\t\tClientID: 1,\n\t\tData: messageFromClient2,\n\t}, t)\n\n\t// Send response back to client 2\n\terr = server.Send(len(serverMessageFromClient2.Data), serverMessageFromClient2.ClientID)\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Client 2 receive response\n\tserverReplyEvent2 := <-clientEvent2\n\tassertEq(serverReplyEvent2, ClientEvent[int]{\n\t\tEventType: ClientReceive,\n\t\tData: len(messageFromClient2),\n\t}, t)\n\n\t// Send message to all clients\n\tmessageFromServer := 29275\n\terr = server.Send(messageFromServer)\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Client 1 receive message\n\tserverMessage1 := <-clientEvent1\n\tassertEq(serverMessage1, ClientEvent[int]{\n\t\tEventType: ClientReceive,\n\t\tData: messageFromServer,\n\t}, t)\n\n\t// Client 2 receive message\n\tserverMessage2 := <-clientEvent2\n\tassertEq(serverMessage2, ClientEvent[int]{\n\t\tEventType: ClientReceive,\n\t\tData: messageFromServer,\n\t}, t)\n\n\t// Client 1 disconnect from server\n\terr = client1.Disconnect()\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Check client 1 disconnect event was received\n\tclientDisconnectEvent1 := <-serverEvent\n\tassertEq(clientDisconnectEvent1, ServerEvent[string]{\n\t\tEventType: ServerDisconnect,\n\t\tClientID: 0,\n\t}, t)\n\n\t// Client 2 disconnect from server\n\terr = client2.Disconnect()\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Check client 2 disconnect event was received\n\tclientDisconnectEvent2 := <-serverEvent\n\tassertEq(clientDisconnectEvent2, ServerEvent[string]{\n\t\tEventType: ServerDisconnect,\n\t\tClientID: 1,\n\t}, t)\n\n\t// Stop server\n\terr = server.Stop()\n\tassertNoErr(err, t)\n\tassert(!server.Serving(), t, \"Server should not be serving\")\n\ttime.Sleep(waitTime)\n}",
"func TestGetAddress(t *testing.T){\n\tt.Logf(\"\\nChecking TestGetAddress\")\n\tfor _,elem := range testGetAddress {\n\t\tfor i, expected := range elem.expected {\n\t\t\toValue, _ := elem.adr.GetAddress(uint(i))\n\t\t\teValue := expected\n\n\t\t\tt.Logf(\"Checking %s spec(%s) --> (%x == %x)\", elem.adr.Name, elem.adr.Type, oValue, eValue)\n\n\t\t\tif oValue != eValue {\n\t\t\t\tt.Errorf(\"Object value %x is not expected %x\", oValue, eValue)\n\t\t\t}\n\t\t}\n\t}\n}",
"func TestClient_GetXpubAddresses(t *testing.T) {\n\t// Skip this test in short mode (not needed)\n\tif testing.Short() {\n\t\tt.Skip(\"skipping testing in short mode\")\n\t}\n\n\t// Create a new client object to handle your queries (supply an API Key)\n\tclient, err := NewClient(testAPIKey, NetworkMain, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar addresses XpubAddresses\n\txPub := \"xpub6AHA9hZDN11k2ijHMeS5QqHx2KP9aMBRhTDqANMnwVtdyw2TDYRmF8PjpvwUFcL1Et8Hj59S3gTSMcUQ5gAqTz3Wd8EsMTmF3DChhqPQBnU\"\n\toffset := 0 // testing with NO offset\n\tlimit := 0 // testing with NO limit\n\torder := \"\" // testing with NO order\n\tfilterByAddress := \"\" // testing with NO filter\n\taddresses, err = client.GetXpubAddresses(xPub, offset, limit, order, filterByAddress)\n\tif err != nil {\n\t\tt.Fatal(\"error occurred: \" + err.Error())\n\t}\n\n\tif len(addresses) == 0 {\n\t\tt.Fatal(\"we should have the some addresses\", addresses, xPub)\n\t}\n\n\t// Test Limit\n\tlimit = 5 // testing with limit\n\taddresses, err = client.GetXpubAddresses(xPub, offset, limit, order, filterByAddress)\n\tif err != nil {\n\t\tt.Fatal(\"error occurred: \" + err.Error())\n\t}\n\n\tif len(addresses) != 5 {\n\t\tt.Fatal(\"we should have 5 addresses\", addresses, xPub, limit)\n\t}\n}",
"func TestGetAddresses(t *testing.T) {\n\tuserJson := `{\"token\":\"` + token + `\"}`\n\treader = strings.NewReader(userJson)\n\treq, err := http.NewRequest(\"POST\", usersUrl, reader)\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif res.StatusCode != 200 {\n\t\tt.Errorf(\"Success expected: %d\", res.StatusCode)\n\t}\n}",
"func getTestAddress(t *testing.T) string {\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to find an available port: %s\", err)\n\t}\n\tdefer listener.Close()\n\treturn listener.Addr().String()\n}",
"func TestClient_ListReservedPublicIPAddresses_Success(test *testing.T) {\n\ttestServer := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {\n\t\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\t\twriter.WriteHeader(http.StatusOK)\n\n\t\tfmt.Fprintln(writer, listReservedPublicIPAddressesResponse)\n\t}))\n\tdefer testServer.Close()\n\n\tclient := NewClientWithBaseAddress(testServer.URL, \"user1\", \"password\")\n\tclient.setAccount(&Account{\n\t\tOrganizationID: \"dummy-organization-id\",\n\t})\n\n\tnetworkDomains, err := client.ListReservedPublicIPAddresses(\"802abc9f-45a7-4efb-9d5a-810082368708\", nil)\n\tif err != nil {\n\t\ttest.Fatal(err)\n\t}\n\n\tverifyListReservedPublicIPAddressesResponse(test, networkDomains)\n}",
"func TestTwoAddresses(t *testing.T) {\n\t// Check that returning more than 2 addresses causes a host to be filtered.\n\tfilter := newProductionFilter(testTwoAddressesResolver{})\n\n\t// Create a few hosts for testing.\n\thostValid1 := newHostFromAddress(\"ipv4.ipv6\")\n\thostValid2 := newHostFromAddress(\"ipv6.ipv4\")\n\thostInvalid1 := newHostFromAddress(\"ipv4.ipv4\")\n\thostInvalid2 := newHostFromAddress(\"ipv6.ipv6\")\n\n\t// Check hosts.\n\tif filter.Filtered(hostValid1) || filter.Filtered(hostValid2) {\n\t\tt.Fatal(\"Valid hosts were filtered.\")\n\t}\n\tif !filter.Filtered(hostInvalid1) || !filter.Filtered(hostInvalid2) {\n\t\tt.Fatal(\"Invalid hosts weren't filtered.\")\n\t}\n}",
"func TestInitAddrs(t *testing.T) {\n\tnb := NewBroker()\n\n\taddr1, addr2 := \"192.168.10.1:5222\", \"10.20.10.0:4222\"\n\n\tnb.Init(broker.Addrs(addr1, addr2))\n\n\tif len(nb.Options().Addrs) != 2 {\n\t\tt.Errorf(\"Expected Addr count = 2, Actual Addr count = %d\", len(nb.Options().Addrs))\n\t}\n\n\tnatsBroker, ok := nb.(*nbroker)\n\tif !ok {\n\t\tt.Fatal(\"Expected broker to be of types *nbroker\")\n\t}\n\n\taddr1f := fmt.Sprintf(\"nats://%s\", addr1)\n\taddr2f := fmt.Sprintf(\"nats://%s\", addr2)\n\n\tif natsBroker.addrs[0] != addr1f && natsBroker.addrs[1] != addr2f {\n\t\texpAddr, actAddr := fmt.Sprintf(\"%s,%s\", addr1f, addr2f), fmt.Sprintf(\"%s,%s\", natsBroker.addrs[0], natsBroker.addrs[1])\n\t\tt.Errorf(\"Expected = '%s', Actual = '%s'\", expAddr, actAddr)\n\t}\n\n}",
"func (s *Server) Test() (ok bool, err error) {\n\ttests := []dns.Question{\n\t\t{dns.Fqdn(\"google.com\"), dns.TypeA, dns.ClassINET},\n\t\t{dns.Fqdn(\"facebook.com\"), dns.TypeA, dns.ClassINET},\n\t\t{dns.Fqdn(\"amazon.com\"), dns.TypeA, dns.ClassINET},\n\t}\n\n\taddr := s.IP + \":53\"\n\tc := new(dns.Client)\n\tvar lastErr error\n\n\tfor _, q := range tests {\n\t\tmsg := new(dns.Msg)\n\t\tmsg.Id = dns.Id()\n\t\tmsg.RecursionDesired = true\n\t\tmsg.Question = make([]dns.Question, 1)\n\t\tmsg.Question[0] = q\n\n\t\tresp, _, err := c.Exchange(msg, addr)\n\t\tif err != nil {\n\t\t\tif err, ok := err.(net.Error); ok && err.Timeout() {\n\t\t\t\t// instant fail\n\t\t\t\treturn false, errors.New(\"TIMEOUT\")\n\t\t\t}\n\t\t\tif lastErr != nil && err.Error() == lastErr.Error() {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tlastErr = err\n\t\t\tcontinue\n\t\t}\n\n\t\tif resp == nil {\n\t\t\terr = errors.New(\"server did not return a result\")\n\t\t\tif lastErr != nil && err.Error() == lastErr.Error() {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tlastErr = err\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn true, nil\n}",
"func testableListenArgs(network, address, client string) bool {\n\tif !testableNetwork(network) || !testableAddress(network, address) {\n\t\treturn false\n\t}\n\n\tvar err error\n\tvar addr net.Addr\n\tswitch ss := strings.Split(network, \":\"); ss[0] {\n\tcase \"srt\", \"srt4\", \"srt6\":\n\t\taddr, err = ResolveSRTAddr(\"srt\", address)\n\tdefault:\n\t\treturn true\n\t}\n\tif err != nil {\n\t\treturn false\n\t}\n\tvar ip net.IP\n\tvar wildcard bool\n\tswitch addr := addr.(type) {\n\tcase *SRTAddr:\n\t\tip = addr.IP\n\t\twildcard = addr.isWildcard()\n\t}\n\n\t// Test wildcard IP addresses.\n\tif wildcard && !testenv.HasExternalNetwork() {\n\t\treturn false\n\t}\n\n\t// Test functionality of IPv4 communication using AF_INET and\n\t// IPv6 communication using AF_INET6 sockets.\n\tif !supportsIPv4() && ip.To4() != nil {\n\t\treturn false\n\t}\n\tif !supportsIPv6() && ip.To16() != nil && ip.To4() == nil {\n\t\treturn false\n\t}\n\tcip := net.ParseIP(client)\n\tif cip != nil {\n\t\tif !supportsIPv4() && cip.To4() != nil {\n\t\t\treturn false\n\t\t}\n\t\tif !supportsIPv6() && cip.To16() != nil && cip.To4() == nil {\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// Test functionality of IPv4 communication using AF_INET6\n\t// sockets.\n\tif !supportsIPv4map() && supportsIPv4() && (network == \"srt\") && wildcard {\n\t\t// At this point, we prefer IPv4 when ip is nil.\n\t\t// See favoriteAddrFamily for further information.\n\t\tif ip.To16() != nil && ip.To4() == nil && cip.To4() != nil { // a pair of IPv6 server and IPv4 client\n\t\t\treturn false\n\t\t}\n\t\tif (ip.To4() != nil || ip == nil) && cip.To16() != nil && cip.To4() == nil { // a pair of IPv4 server and IPv6 client\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func getServerAddr() string {\n\taddr := getCommandLineOptValue(\"-address\")\n\tif addr != \"\" {\n\t\treturn addr\n\t}\n\treturn defaultServerAddr\n}",
"func UA_Client_findServers(client []UA_Client, serverUrl []byte, serverUrisSize uint, serverUris []UA_String, localeIdsSize uint, localeIds []UA_String, registeredServersSize []uint, registeredServers [][]UA_ApplicationDescription) UA_StatusCode {\n\tvar connected UA_Boolean = UA_Boolean((int((map[bool]int{false: 0, true: 1}[uint32(client[0].state) > uint32(UA_CLIENTSTATE_DISCONNECTED)]))))\n\tif int((int((connected)))) != 0 && strncmp(client[0].endpointUrl.data, serverUrl, uint32((uint(client[0].endpointUrl.length)))) != 0 {\n\t\t// Client is already connected to a different server\n\t\treturn UA_StatusCode((uint32_t((uint32((uint32(2158690304)))))))\n\t}\n\tif int((int((noarch.NotUA_Boolean(UA_Boolean(connected)))))) != 0 {\n\t\tvar retval UA_StatusCode = UA_Client_connectInternal(client, serverUrl, 1, 0)\n\t\tif retval != UA_StatusCode((uint32_t((uint32((0)))))) {\n\t\t\treturn UA_StatusCode(retval)\n\t\t}\n\t}\n\tvar request UA_FindServersRequest\n\t// Prepare the request\n\tUA_FindServersRequest_init((*[100000000]UA_FindServersRequest)(unsafe.Pointer(&request))[:])\n\trequest.serverUrisSize = serverUrisSize\n\trequest.serverUris = serverUris\n\trequest.localeIdsSize = localeIdsSize\n\trequest.localeIds = localeIds\n\tvar response UA_FindServersResponse\n\t// Send the request\n\t__UA_Client_Service(client, (*[100000000]UA_FindServersRequest)(unsafe.Pointer(&request))[:], (*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[145]))[:], (*[100000000]UA_FindServersResponse)(unsafe.Pointer(&response))[:], (*[100000000]UA_DataType)(unsafe.Pointer(&UA_TYPES[194]))[:])\n\tvar retval UA_StatusCode = UA_StatusCode(response.responseHeader.serviceResult)\n\tif retval == UA_StatusCode((uint32_t((uint32((0)))))) {\n\t\t// Process the response\n\t\tregisteredServersSize[0] = uint(response.serversSize)\n\t\tregisteredServers[0] = response.servers\n\t\tresponse.serversSize = 0\n\t\tresponse.servers = nil\n\t} else {\n\t\tregisteredServersSize[0] = 0\n\t\tregisteredServers[0] = nil\n\t}\n\t// Clean up\n\tUA_FindServersResponse_deleteMembers((*[100000000]UA_FindServersResponse)(unsafe.Pointer(&response))[:])\n\tif int((int((noarch.NotUA_Boolean(UA_Boolean(connected)))))) != 0 {\n\t\tUA_Client_disconnect(client)\n\t}\n\treturn UA_StatusCode(retval)\n}",
"func testableAddress(network, address string) bool {\n\treturn true\n}",
"func populateConfigServerAddress() {\n\t//config server Address , higher priority for environment variable\n\tconfigServerAddrFromEnv := readEndpoint(common.EnvCCEndpoint)\n\tif configServerAddrFromEnv != \"\" {\n\t\tGlobalDefinition.ServiceComb.Config.Client.ServerURI = configServerAddrFromEnv\n\t}\n}",
"func TestIpAddressParsing(t *testing.T) {\n\tclientObject := client.New()\n\tcorrectIP := \"127.0.0.1\"\n\twrongIP := \"12345\"\n\tipAddressOutOfRange := \"620.10.42.201\"\n\tcharacterIP := \"Zehn.Zehn.Zehn.Hundert\"\n\tif error := clientObject.SetIpAddressAsString(correctIP); error != nil {\n\t\tt.Error(\"A ip address of \" + correctIP + \" must be correct.\")\n\t}\n\tif error := clientObject.SetIpAddressAsString(wrongIP); error == nil {\n\t\tt.Error(\"A ip address of \" + wrongIP + \" should not be parsed, because it is a simple number.\")\n\t}\n\tif error := clientObject.SetIpAddressAsString(ipAddressOutOfRange); error == nil {\n\t\tt.Error(\"A ip address of \" + ipAddressOutOfRange + \" should not be parsed, because it is out of the ip address range.\")\n\t}\n\tif error := clientObject.SetIpAddressAsString(characterIP); error == nil {\n\t\tt.Error(\"A ip address of \" + characterIP + \" should not be parsed, because it is a orinary string (word).\")\n\t}\n}",
"func (s *LrpcTestSuite) TestClientInfo() {\n\tt := s.T()\n\n\tt.Log(\"Test get caller information\")\n\n\tcl := s.setupClient()\n\tdefer cl.Close()\n\n\tres, err := DoRequest(cl, MsgInfo, nil)\n\ts.Assert().NoError(err, \"Request should be sent with no error\")\n\n\ts.Assert().GreaterOrEqualf(len(res), 2, \"Result should have at least 2 elements\")\n\n\tstatus, ok := res[0].(bool)\n\ts.Assert().Truef(ok, \"First response must be boolean\")\n\ts.Assert().Truef(status, \"First response must be true\")\n\tif !status {\n\t\terrmsg, _ := res[1].(string)\n\t\ts.Failf(\"Server return error: [%s]\\n\", errmsg)\n\t} else {\n\t\tt.Logf(\"Returned result: %v\", res)\n\n\t\tisPriv, ok := res[1].(bool)\n\t\ts.Assert().Truef(ok, \"Second response value must be boolean\")\n\t\trunAsPriv, err := utils.RunByPrivilegedUser()\n\t\ts.Assert().NoError(err, \"Should get information about whether current process is run by privileged user\")\n\t\ts.Assert().Equalf(runAsPriv, isPriv, \"privilege user information incorrect\")\n\n\t\t// check process ID\n\t\tpid, ok := res[2].(int32)\n\t\ts.Assert().Truef(ok, \"Returned PID should be int32\")\n\t\ts.Assert().Equalf(int(pid), os.Getpid(), \"Process ID should be the same\")\n\n\t\t// check program name\n\t\ts.Assert().Equalf(len(res), 4, \"Good result should have 4 elements\")\n\t\tif len(res) >= 4 {\n\t\t\tname, ok := res[3].(string)\n\t\t\ts.Assert().True(ok, \"Program name should be a string\")\n\t\t\ts.Assert().Containsf(name, \".test\", \"Program name should contain .test\")\n\t\t}\n\t}\n}",
"func getListenIPs(httpServerConf *http.Server) (hosts []string, port string) {\n\thost, port, err := net.SplitHostPort(httpServerConf.Addr)\n\thelper.FatalIf(err, \"Unable to parse host port.\")\n\n\tswitch {\n\tcase host != \"\":\n\t\thosts = append(hosts, host)\n\tdefault:\n\t\taddrs, err := net.InterfaceAddrs()\n\t\thelper.FatalIf(err, \"Unable to determine network interface address.\")\n\t\tfor _, addr := range addrs {\n\t\t\tif addr.Network() == \"ip+net\" {\n\t\t\t\thost := strings.Split(addr.String(), \"/\")[0]\n\t\t\t\tif ip := net.ParseIP(host); ip.To4() != nil {\n\t\t\t\t\thosts = append(hosts, host)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn hosts, port\n}",
"func createTestingConns() (clientConn, serverConn net.Conn) {\n\tln, _ := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tserverConn, _ = ln.Accept()\n\t\twg.Done()\n\t}()\n\tclientConn, _ = net.Dial(\"tcp\", ln.Addr().String())\n\twg.Wait()\n\treturn\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test sending messages between server and client | func TestSend(t *testing.T) {
// Create server
server, serverEvent := NewServer[int, string]()
assert(!server.Serving(), t, "Server should not be serving")
// Start server
err := server.Start("127.0.0.1", 0)
assertNoErr(err, t)
assert(server.Serving(), t, "Server should be serving")
time.Sleep(waitTime)
// Check server address info
host, port, err := server.GetAddr()
assertNoErr(err, t)
assert(server.sock.Addr().String() == host+":"+strconv.Itoa(int(port)), t, "Address strings don't match")
fmt.Printf("Server address: %s:%d\n", host, port)
// Create client
client, clientEvent := NewClient[string, int]()
assert(!client.Connected(), t, "Client should not be connected")
// Connect to server
err = client.Connect(host, port)
assertNoErr(err, t)
assert(client.Connected(), t, "Client should be connected")
time.Sleep(waitTime)
// Check client address info
host, port, err = client.GetAddr()
assertNoErr(err, t)
assert(client.sock.LocalAddr().String() == host+":"+strconv.Itoa(int(port)), t, "Address strings don't match")
fmt.Printf("Client address: %s:%d\n", host, port)
// Check connect event was received
clientConnectEvent := <-serverEvent
assertEq(clientConnectEvent, ServerEvent[string]{
EventType: ServerConnect,
ClientID: 0,
}, t)
// Send message to client
messageFromServer := 29275
err = server.Send(messageFromServer)
assertNoErr(err, t)
time.Sleep(waitTime)
// Receive message from server
clientReceiveEvent1 := <-clientEvent
assertEq(clientReceiveEvent1, ClientEvent[int]{
EventType: ClientReceive,
Data: messageFromServer,
}, t)
time.Sleep(waitTime)
// Send message to server
messageFromClient := "Hello, server!"
err = client.Send(messageFromClient)
assertNoErr(err, t)
time.Sleep(waitTime)
// Receive message from client
serverReceiveEvent := <-serverEvent
assertEq(serverReceiveEvent, ServerEvent[string]{
EventType: ServerReceive,
ClientID: 0,
Data: messageFromClient,
}, t)
time.Sleep(waitTime)
// Send response to client
err = server.Send(len(serverReceiveEvent.Data))
assertNoErr(err, t)
time.Sleep(waitTime)
// Receive response from server
clientReceiveEvent2 := <-clientEvent
assertEq(clientReceiveEvent2, ClientEvent[int]{
EventType: ClientReceive,
Data: len(messageFromClient),
}, t)
time.Sleep(waitTime)
// Disconnect from server
err = client.Disconnect()
assertNoErr(err, t)
time.Sleep(waitTime)
// Check disconnect event was received
clientDisconnectEvent := <-serverEvent
assertEq(clientDisconnectEvent, ServerEvent[string]{
EventType: ServerDisconnect,
ClientID: 0,
}, t)
// Stop server
err = server.Stop()
assertNoErr(err, t)
assert(!server.Serving(), t, "Server should not be serving")
time.Sleep(waitTime)
} | [
"func TestSendingNumerousMessages(t *testing.T) {\n\t// Create server\n\tserver, serverEvent := NewServer[int, int]()\n\tassert(!server.Serving(), t, \"Server should not be serving\")\n\n\t// Start server\n\terr := server.Start(\"127.0.0.1\", 0)\n\tassertNoErr(err, t)\n\tassert(server.Serving(), t, \"Server should be serving\")\n\ttime.Sleep(waitTime)\n\n\t// Check server address info\n\thost, port, err := server.GetAddr()\n\tassertNoErr(err, t)\n\tassert(server.sock.Addr().String() == host+\":\"+strconv.Itoa(int(port)), t, \"Address strings don't match\")\n\tfmt.Printf(\"Server address: %s:%d\\n\", host, port)\n\n\t// Create client\n\tclient, clientEvent := NewClient[int, int]()\n\tassert(!client.Connected(), t, \"Client should not be connected\")\n\n\t// Connect to server\n\terr = client.Connect(host, port)\n\tassertNoErr(err, t)\n\tassert(client.Connected(), t, \"Client should be connected\")\n\ttime.Sleep(waitTime)\n\n\t// Check client address info\n\thost, port, err = client.GetAddr()\n\tassertNoErr(err, t)\n\tassert(client.sock.LocalAddr().String() == host+\":\"+strconv.Itoa(int(port)), t, \"Address strings don't match\")\n\tfmt.Printf(\"Client address: %s:%d\\n\", host, port)\n\n\t// Check connect event was received\n\tclientConnectEvent := <-serverEvent\n\tassertEq(clientConnectEvent, ServerEvent[int]{\n\t\tEventType: ServerConnect,\n\t\tClientID: 0,\n\t}, t)\n\n\t// Generate messages\n\tnumServerMessages := (rand.Int() % 64) + 64\n\tnumClientMessages := (rand.Int() % 128) + 128\n\tserverMessages := make([]int, numServerMessages)\n\tclientMessages := make([]int, numClientMessages)\n\tfor i := 0; i < numServerMessages; i++ {\n\t\tserverMessages = append(serverMessages, rand.Int()%1024)\n\t}\n\tfor i := 0; i < numClientMessages; i++ {\n\t\tclientMessages = append(clientMessages, rand.Int()%1024)\n\t}\n\tfmt.Printf(\"Generated %d server messages\\n\", numServerMessages)\n\tfmt.Printf(\"Generated %d client messages\\n\", numClientMessages)\n\n\t// Send messages\n\tfor _, serverMessage := range serverMessages {\n\t\terr := client.Send(serverMessage)\n\t\tassertNoErr(err, t)\n\t}\n\tfor _, clientMessage := range clientMessages {\n\t\terr := server.Send(clientMessage)\n\t\tassertNoErr(err, t)\n\t}\n\ttime.Sleep(waitTime)\n\n\t// Receive messages from client\n\tfor _, serverMessage := range serverMessages {\n\t\tserverReceiveEvent := <-serverEvent\n\t\tassertEq(serverReceiveEvent, ServerEvent[int]{\n\t\t\tEventType: ServerReceive,\n\t\t\tClientID: 0,\n\t\t\tData: serverMessage,\n\t\t}, t)\n\t}\n\n\t// Receive messages from server\n\tfor _, clientMessage := range clientMessages {\n\t\tclientReceiveEvent := <-clientEvent\n\t\tassertEq(clientReceiveEvent, ClientEvent[int]{\n\t\t\tEventType: ClientReceive,\n\t\t\tData: clientMessage,\n\t\t}, t)\n\t}\n\n\t// Disconnect from server\n\terr = client.Disconnect()\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Check disconnect event was received\n\tclientDisconnectEvent := <-serverEvent\n\tassertEq(clientDisconnectEvent, ServerEvent[int]{\n\t\tEventType: ServerDisconnect,\n\t\tClientID: 0,\n\t}, t)\n\n\t// Stop server\n\terr = server.Stop()\n\tassertNoErr(err, t)\n\tassert(!server.Serving(), t, \"Server should not be serving\")\n\ttime.Sleep(waitTime)\n}",
"func TestSender(t *testing.T) {\n\t// Set up our variables.\n\tdataChan := make(chan []byte)\n\tconn := new(MockConn)\n\tconn.SetBuffer = make(chan []byte)\n\n\t// Prepare the messages we're going to send.\n\tmessages := [][]byte{\n\t\t[]byte(\"PASS secretpasswordhere\"),\n\t\t[]byte(\"SERVICE dict * *.fr 0 0 :French Dictionary\"),\n\t\t[]byte(\":[email protected] QUIT :Gone to have lunch\"),\n\t}\n\n\t// Start the goroutine.\n\tgo Sender(conn, dataChan)\n\n\tfor _, msg := range messages {\n\t\tdataChan <- msg\n\t\tconn.SetBuffer <- msg\n\n\t\t// Sleep to give the other goroutine some time to make the copy. This slows the tests,\n\t\t// but we just have to accept that.\n\t\ttime.Sleep(200)\n\n\t\tif cmp := bytes.Compare(conn.Buffer, msg); cmp != 0 {\n\t\t\tt.Errorf(\n\t\t\t\t\"Failed to write correctly: expected %v, got %v.\",\n\t\t\t\tmsg,\n\t\t\t\tconn.Buffer)\n\t\t}\n\t}\n\n\t// Close the channel.\n\tclose(dataChan)\n}",
"func testSend(t *testing.T, tun Tunnel, wait chan bool, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\n\t// wait for the listener to get ready\n\t<-wait\n\n\t// dial a new session\n\tc, err := tun.Dial(\"test-tunnel\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\tm := transport.Message{\n\t\tHeader: map[string]string{\n\t\t\t\"test\": \"send\",\n\t\t},\n\t}\n\n\t// send the message\n\tif err := c.Send(&m); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// now wait for the response\n\tmr := new(transport.Message)\n\tif err := c.Recv(mr); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t<-wait\n\n\tif v := mr.Header[\"test\"]; v != \"accept\" {\n\t\tt.Fatalf(\"Message not received from accepted side. Received: %s\", v)\n\t}\n}",
"func TestOneWaySend(t *testing.T) {\n\tfmt.Println(\"Starting one way send test. Be patient, the test may run for several minutes\")\n\tfmt.Println(\"One way send test case launches two servers and sends 10k messages from one \")\n\tfmt.Println(\"server to the other. Test checks that exactly one copy of every message is received\")\n\tconf := Config{MemberRegSocket: \"127.0.0.1:9999\", PeerSocket: \"127.0.0.1:9009\"}\n\n\t// launch proxy server\n\tgo acceptClusterMember(9999)\n\tgo sendClusterMembers(9009)\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tsender, err := NewWithConfig(1, \"127.0.0.1\", 5011, &conf)\n\tif err != nil {\n\t\tt.Errorf(\"Error in creating server \", err.Error())\n\t}\n\n\treceiver, err := NewWithConfig(2, \"127.0.0.1\", 5012, &conf)\n\tif err != nil {\n\t\tt.Errorf(\"Error in creating server \", err.Error())\n\t}\n\n\tdone := make(chan bool, 1)\n\tcount := 10000\n\n\trecord := make([]uint32, count)\n\n\tgo receive(receiver.Inbox(), record, count, 2, done)\n\tgo sendMessages(sender.Outbox(), count, 2, 1)\n\tselect {\n\tcase <-done:\n\t\tfmt.Println(\"TestOneWaySend passed successfully\")\n\t\tbreak\n\tcase <-time.After(5 * time.Minute):\n\t\tt.Errorf(\"Could not send \", strconv.Itoa(count), \" messages in 5 minute\")\n\t\tbreak\n\t}\n}",
"func TestClientSend(t *testing.T) {\n\tsrc := newClient(\"abc\", nil)\n\tdest := newClient(\"def\", nil)\n\n\t// The message should be queued since dest has not registered.\n\tm := \"hello\"\n\tif err := src.send(dest, m); err != nil {\n\t\tt.Errorf(\"When dest is not registered, src.send(dest, %q) got error: %s, want nil\", m, err.Error())\n\t}\n\tif len(src.msgs) != 1 || src.msgs[0] != m {\n\t\tt.Errorf(\"After src.send(dest, %q) when dest is not registered, src.msgs = %v, want [%q]\", m, src.msgs, m)\n\t}\n\n\trwc := collidertest.MockReadWriteCloser{Closed: false}\n\tdest.register(&rwc)\n\n\t// The message should be sent this time.\n\tm2 := \"hi\"\n\tsrc.send(dest, m2)\n\n\tif rwc.Msg == \"\" {\n\t\tt.Errorf(\"When dest is registered, after src.send(dest, %q), dest.rwc.Msg = %v, want %q\", m2, rwc.Msg, m2)\n\t}\n\tif len(src.msgs) != 1 || src.msgs[0] != m {\n\t\tt.Errorf(\"When dest is registered, after src.send(dest, %q), src.msgs = %v, want [%q]\", m2, src.msgs, m)\n\t}\n}",
"func TestLargeSend(t *testing.T) {\n\t// Create server\n\tserver, serverEvent := NewServer[[]byte, []byte]()\n\tassert(!server.Serving(), t, \"Server should not be serving\")\n\n\t// Start server\n\terr := server.Start(\"127.0.0.1\", 0)\n\tassertNoErr(err, t)\n\tassert(server.Serving(), t, \"Server should be serving\")\n\ttime.Sleep(waitTime)\n\n\t// Check server address info\n\thost, port, err := server.GetAddr()\n\tassertNoErr(err, t)\n\tassert(server.sock.Addr().String() == host+\":\"+strconv.Itoa(int(port)), t, \"Address strings don't match\")\n\tfmt.Printf(\"Server address: %s:%d\\n\", host, port)\n\n\t// Create client\n\tclient, clientEvent := NewClient[[]byte, []byte]()\n\tassert(!client.Connected(), t, \"Client should not be connected\")\n\n\t// Connect to server\n\terr = client.Connect(host, port)\n\tassertNoErr(err, t)\n\tassert(client.Connected(), t, \"Client should be connected\")\n\ttime.Sleep(waitTime)\n\n\t// Check client address info\n\thost, port, err = client.GetAddr()\n\tassertNoErr(err, t)\n\tassert(client.sock.LocalAddr().String() == host+\":\"+strconv.Itoa(int(port)), t, \"Address strings don't match\")\n\tfmt.Printf(\"Client address: %s:%d\\n\", host, port)\n\n\t// Check connect event was received\n\tclientConnectEvent := <-serverEvent\n\tassertEq(clientConnectEvent, ServerEvent[[]byte]{\n\t\tEventType: ServerConnect,\n\t\tClientID: 0,\n\t}, t)\n\n\t// Generate large messages\n\tlargeMessageFromServerLength := rand.Int() % 65536\n\tlargeMessageFromServer := make([]byte, largeMessageFromServerLength)\n\tn, err := rand.Read(largeMessageFromServer)\n\tassertNoErr(err, t)\n\tassertEq(n, largeMessageFromServerLength, t)\n\tfmt.Printf(\"Generated large message from server (%d bytes)\\n\", largeMessageFromServerLength)\n\tlargeMessageFromClientLength := rand.Int() % 32768\n\tlargeMessageFromClient := make([]byte, largeMessageFromClientLength)\n\tn, err = rand.Read(largeMessageFromClient)\n\tassertNoErr(err, t)\n\tassertEq(n, largeMessageFromClientLength, t)\n\tfmt.Printf(\"Generated large message from client (%d bytes)\\n\", largeMessageFromClientLength)\n\n\t// Send large message to client\n\terr = server.Send(largeMessageFromServer)\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Receive large message from server\n\tdataFromServer := <-clientEvent\n\tassertEq(dataFromServer, ClientEvent[[]byte]{\n\t\tEventType: ClientReceive,\n\t\tData: largeMessageFromServer,\n\t}, t)\n\n\t// Send large message to server\n\terr = client.Send(largeMessageFromClient)\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Receive large message from client\n\tdataFromClient := <-serverEvent\n\tassertEq(dataFromClient, ServerEvent[[]byte]{\n\t\tEventType: ServerReceive,\n\t\tClientID: 0,\n\t\tData: largeMessageFromClient,\n\t}, t)\n\n\t// Disconnect from server\n\terr = client.Disconnect()\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Check disconnect event was received\n\tclientDisconnectEvent := <-serverEvent\n\tassertEq(clientDisconnectEvent, ServerEvent[[]byte]{\n\t\tEventType: ServerDisconnect,\n\t\tClientID: 0,\n\t}, t)\n\n\t// Stop server\n\terr = server.Stop()\n\tassertNoErr(err, t)\n\tassert(!server.Serving(), t, \"Server should not be serving\")\n\ttime.Sleep(waitTime)\n}",
"func TestListenHandleSend(t *testing.T) {\n\tfmt.Println(\"\\n----------------TestListen----------------\")\n\n\tconn, err := net.Dial(\"tcp\", \"127.0.0.1:9000\")\n\n\tif err != nil {\n\t\tt.Error(err.Error())\n\t} else {\n\t\tfmt.Println(\"Successfully Connected To Tracker\")\n\t\tsuccessful++\n\t}\n\n\tfmt.Println(\"\\n----------------TestHandleRequest----------------\")\n\n\tfmt.Fprintf(conn, \"lol:fake.txt\\n\")\n\n\treply, err := bufio.NewReader(conn).ReadString('\\n') // Waits for a String ending in newline\n\treply = strings.TrimSpace(reply)\n\n\tif err != nil {\n\t\tfmt.Println(\"Successfully Handled Invalid Request\")\n\t\tsuccessful++\n\t} else {\n\t\tt.Error(\"Test failed, expected to have server respond 'NO'. Got\", reply)\n\t}\n\tconn.Close()\n\n\tconn, err = net.Dial(\"tcp\", \"127.0.0.1:9000\")\n\n\tfmt.Fprintf(conn, \"Swarm_Request:111.111.111.111:0000:Tests\\n\")\n\n\treply, err = bufio.NewReader(conn).ReadString('\\n') // Waits for a String ending in newline\n\treply = strings.TrimSpace(reply)\n\tcontent, _ := ioutil.ReadFile(sPath)\n\ts := string(content)\n\n\tif strings.Contains(s, reply) {\n\t\tfmt.Println(\"Successfully Handled Valid Request\")\n\t\tsuccessful++\n\t} else {\n\t\tt.Error(\"Test failed, expected to receive swarm.info. Got\", reply)\n\t}\n\n\tconn.Close()\n\n\tfmt.Println(\"\\n----------------TestSendFile----------------\")\n\n\tconn, err = net.Dial(\"tcp\", \"127.0.0.1:9000\")\n\n\tfmt.Fprintf(conn, \"Meta_Request:111.111.111.111:0000:Tests\\n\")\n\n\treply, err = bufio.NewReader(conn).ReadString('\\n') // Waits for a String ending in newline\n\treply = strings.TrimSpace(reply)\n\tcontent, _ = ioutil.ReadFile(mPath)\n\ts = string(content)\n\n\tif strings.Contains(s, reply) {\n\t\tfmt.Println(\"Successfully Sent A File\")\n\t\tsuccessful++\n\t} else {\n\t\tt.Error(\"Test failed, expected to receive first line of meta.info. Got\", reply)\n\t}\n\n}",
"func TestRPCSendMessage(t *testing.T) {\n\t// create RPCServer\n\trpcs, err := NewRPCServer(9987)\n\tif err != nil {\n\t\tt.Fatal(\"Failed to initialize TCPServer:\", err)\n\t}\n\tdefer rpcs.Close()\n\n\t// add a message handler to the server\n\ttsh := new(TestStoreHandler)\n\tid := rpcs.RegisterHandler(tsh)\n\n\t// send a message\n\tm := &Message{\n\t\tAddress{id, \"localhost\", 9987},\n\t\t\"TestStoreHandler.StoreMessage\",\n\t\t\"hello, world!\",\n\t\tnil,\n\t}\n\terr = rpcs.SendMessage(m)\n\tif err != nil {\n\t\tt.Fatal(\"Failed to send message:\", err)\n\t}\n\n\tif tsh.message != \"hello, world!\" {\n\t\tt.Fatal(\"Bad response: expected \\\"hello, world!\\\", got \\\"\" + tsh.message + \"\\\"\")\n\t}\n\n\t// send a message asynchronously\n\ttsh.message = \"\"\n\terrChan := rpcs.SendAsyncMessage(m)\n\terr = <-errChan\n\tif err != nil {\n\t\tt.Fatal(\"Failed to send message:\", err)\n\t}\n\n\tif tsh.message != \"hello, world!\" {\n\t\tt.Fatal(\"Bad response: expected \\\"hello, world!\\\", got \\\"\" + tsh.message + \"\\\"\")\n\t}\n}",
"func TestTransportSend(t *testing.T) {\n\tpeer1 := newFakePeer()\n\tpeer2 := newFakePeer()\n\ttr := &Transport{\n\t\tServerStats: stats.NewServerStats(\"\", \"\"),\n\t\tpeers: map[types.ID]Peer{types.ID(1): peer1, types.ID(2): peer2},\n\t}\n\twmsgsIgnored := []raftpb.Message{\n\t\t// bad local message\n\t\t{Type: raftpb.MsgBeat},\n\t\t// bad remote message\n\t\t{Type: raftpb.MsgProp, To: 3},\n\t}\n\twmsgsTo1 := []raftpb.Message{\n\t\t// good message\n\t\t{Type: raftpb.MsgProp, To: 1},\n\t\t{Type: raftpb.MsgApp, To: 1},\n\t}\n\twmsgsTo2 := []raftpb.Message{\n\t\t// good message\n\t\t{Type: raftpb.MsgProp, To: 2},\n\t\t{Type: raftpb.MsgApp, To: 2},\n\t}\n\ttr.Send(wmsgsIgnored)\n\ttr.Send(wmsgsTo1)\n\ttr.Send(wmsgsTo2)\n\n\tif !reflect.DeepEqual(peer1.msgs, wmsgsTo1) {\n\t\tt.Errorf(\"msgs to peer 1 = %+v, want %+v\", peer1.msgs, wmsgsTo1)\n\t}\n\tif !reflect.DeepEqual(peer2.msgs, wmsgsTo2) {\n\t\tt.Errorf(\"msgs to peer 2 = %+v, want %+v\", peer2.msgs, wmsgsTo2)\n\t}\n}",
"func Test2ClientChatUsingMultipleClients(t *testing.T) {\n\n\t//code to create random string for the datapoint\n\tconst charset = \"abcdefghijklmnopqrstuvwxyz\" +\n\t\t\"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n\n\tconst numclients = 100\n\tconst datapoints = 2\n\n\tvar seededRand = rand.New(\n\t\trand.NewSource(time.Now().UnixNano()))\n\n\tstringmaker := func(length int, charset string) string {\n\t\tb := make([]byte, length)\n\t\tfor i := range b {\n\t\t\tb[i] = charset[seededRand.Intn(len(charset))]\n\t\t}\n\t\treturn string(b)\n\t}\n\tgetstring := func(length int) string {\n\t\treturn stringmaker(length, charset)\n\t}\n\n\tstart := time.Now()\n\tdata := make([]string, datapoints)\n\tdone := make(chan bool, numclients)\n\t//start all the clients and pass the two msgs to each of them to broadcast\n\tfor i := 0; i < numclients; i++ {\n\t\tfor j := 0; j < datapoints; j++ {\n\t\t\tdata[j] = getstring(10)\n\t\t}\n\t\tgo startClient(data, done)\n\t}\n\t//wait for all the clients to finish\n\tcount := 0\n\tfor i := 1; i <= numclients; i++ {\n\t\t<-done\n\t\tcount = count + 1\n\t}\n\tassertEquals(t, count, numclients)\n\telapsed := time.Since(start)\n\t//measure the elapsed time\n\tfmt.Println(elapsed)\n}",
"func TestSendingCustomTypes(t *testing.T) {\n\t// Create server\n\tserver, serverEvent := NewServer[custom, custom]()\n\tassert(!server.Serving(), t, \"Server should not be serving\")\n\n\t// Start server\n\terr := server.Start(\"127.0.0.1\", 0)\n\tassertNoErr(err, t)\n\tassert(server.Serving(), t, \"Server should be serving\")\n\ttime.Sleep(waitTime)\n\n\t// Check server address info\n\thost, port, err := server.GetAddr()\n\tassertNoErr(err, t)\n\tassert(server.sock.Addr().String() == host+\":\"+strconv.Itoa(int(port)), t, \"Address strings don't match\")\n\tfmt.Printf(\"Server address: %s:%d\\n\", host, port)\n\n\t// Create client\n\tclient, clientEvent := NewClient[custom, custom]()\n\tassert(!client.Connected(), t, \"Client should not be connected\")\n\n\t// Connect to server\n\terr = client.Connect(host, port)\n\tassertNoErr(err, t)\n\tassert(client.Connected(), t, \"Client should be connected\")\n\ttime.Sleep(waitTime)\n\n\t// Check client address info\n\thost, port, err = client.GetAddr()\n\tassertNoErr(err, t)\n\tassert(client.sock.LocalAddr().String() == host+\":\"+strconv.Itoa(int(port)), t, \"Address strings don't match\")\n\tfmt.Printf(\"Client address: %s:%d\\n\", host, port)\n\n\t// Check connect event was received\n\tclientConnectEvent := <-serverEvent\n\tassertEq(clientConnectEvent, ServerEvent[custom]{\n\t\tEventType: ServerConnect,\n\t\tClientID: 0,\n\t}, t)\n\n\t// Messages\n\tserverMessage := custom{\n\t\tA: 123,\n\t\tB: \"Hello, custom server class!\",\n\t\tC: []string{\"first server item\", \"second server item\"},\n\t}\n\tclientMessage := custom{\n\t\tA: 456,\n\t\tB: \"Hello, custom client class!\",\n\t\tC: []string{\"#1 client item\", \"client item #2\", \"(3) client item\"},\n\t}\n\n\t// Send message to client\n\terr = server.Send(clientMessage)\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Receive message from server\n\tclientReceiveEvent1 := <-clientEvent\n\tassertEq(clientReceiveEvent1, ClientEvent[custom]{\n\t\tEventType: ClientReceive,\n\t\tData: clientMessage,\n\t}, t)\n\ttime.Sleep(waitTime)\n\n\t// Send message to server\n\terr = client.Send(serverMessage)\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Receive message from client\n\tserverReceiveEvent := <-serverEvent\n\tassertEq(serverReceiveEvent, ServerEvent[custom]{\n\t\tEventType: ServerReceive,\n\t\tClientID: 0,\n\t\tData: serverMessage,\n\t}, t)\n\ttime.Sleep(waitTime)\n\n\t// Disconnect from server\n\terr = client.Disconnect()\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Check disconnect event was received\n\tclientDisconnectEvent := <-serverEvent\n\tassertEq(clientDisconnectEvent, ServerEvent[custom]{\n\t\tEventType: ServerDisconnect,\n\t\tClientID: 0,\n\t}, t)\n\n\t// Stop server\n\terr = server.Stop()\n\tassertNoErr(err, t)\n\tassert(!server.Serving(), t, \"Server should not be serving\")\n\ttime.Sleep(waitTime)\n}",
"func TestSendRecv(t *testing.T) {\n\t// Create the sender.\n\tc := codec.NewGoGoProtobufCodec()\n\tassert.NotNil(t, c)\n\ttr := transporter.NewHTTPTransporter(\"localhost:8008\")\n\n\t// Should fail to create the messenger.\n\tassert.Nil(t, New(c, tr, false, false))\n\tm := New(c, tr, true, true)\n\tassert.NotNil(t, m)\n\n\tassert.NoError(t, m.RegisterMessage(&example.GoGoProtobufTestMessage1{}))\n\tassert.NoError(t, m.RegisterMessage(&example.GoGoProtobufTestMessage2{}))\n\tassert.NoError(t, m.RegisterMessage(&example.GoGoProtobufTestMessage3{}))\n\tassert.NoError(t, m.RegisterMessage(&example.GoGoProtobufTestMessage4{}))\n\n\tassert.NoError(t, m.RegisterHandler(&example.GoGoProtobufTestMessage1{}, handler1))\n\tassert.NoError(t, m.RegisterHandler(&example.GoGoProtobufTestMessage2{}, handler2))\n\tassert.NoError(t, m.RegisterHandler(&example.GoGoProtobufTestMessage3{}, handler3))\n\tassert.NoError(t, m.RegisterHandler(&example.GoGoProtobufTestMessage4{}, handler4))\n\n\t// Create the echo server.\n\tc = codec.NewGoGoProtobufCodec()\n\tassert.NotNil(t, c)\n\ttr = transporter.NewHTTPTransporter(\"localhost:8009\")\n\n\tn := New(c, tr, false, true)\n\tassert.NotNil(t, n)\n\n\te := &echoServer{\n\t\tm: n,\n\t\tpeerAddr: \"localhost:8008\",\n\t}\n\n\tassert.NoError(t, n.RegisterMessage(&example.GoGoProtobufTestMessage1{}))\n\tassert.NoError(t, n.RegisterMessage(&example.GoGoProtobufTestMessage2{}))\n\tassert.NoError(t, n.RegisterMessage(&example.GoGoProtobufTestMessage3{}))\n\tassert.NoError(t, n.RegisterMessage(&example.GoGoProtobufTestMessage4{}))\n\n\tassert.NoError(t, n.RegisterHandler(&example.GoGoProtobufTestMessage1{}, e.msgHandler))\n\tassert.NoError(t, n.RegisterHandler(&example.GoGoProtobufTestMessage2{}, e.msgHandler))\n\tassert.NoError(t, n.RegisterHandler(&example.GoGoProtobufTestMessage3{}, e.msgHandler))\n\tassert.NoError(t, n.RegisterHandler(&example.GoGoProtobufTestMessage4{}, e.msgHandler))\n\n\tassert.NoError(t, m.Start())\n\tassert.NoError(t, n.Start())\n\n\tcnt := 10\n\tmessages := generateMessages(cnt)\n\n\tgo func() {\n\t\tfor i := range messages {\n\t\t\tm.Send(\"localhost:8009\", messages[i])\n\t\t}\n\t}()\n\n\tvar recvMessages []interface{}\n\n\twait := make(chan struct{})\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-wait:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\t\t\tmsg, err := m.Recv()\n\t\t\tassert.NoError(t, err)\n\n\t\t\trecvMessages = append(recvMessages, msg)\n\t\t}\n\t}()\n\t<-time.After(time.Second * 5)\n\n\tfor i := range messages {\n\t\tassert.Equal(t, messages[i], recvMessages[i])\n\n\t}\n\n\t// Verify that the handlers are called.\n\tassert.Equal(t, cnt, count1)\n\tassert.Equal(t, cnt, count2)\n\tassert.Equal(t, cnt, count3)\n\tassert.Equal(t, cnt, count4)\n\n\tassert.NoError(t, m.Stop())\n\tassert.NoError(t, n.Stop())\n\n\tassert.NoError(t, m.Destroy())\n\tassert.NoError(t, n.Destroy())\n}",
"func _TestHugeMessages(t *testing.T) {\n\tfmt.Println(\"Starting large message send test. Be patient, the test may run for several minutes\")\n\tfmt.Println(\"TestHugeMessages sends 1k messages each of 10^7 bytes in length\")\n\tconf := Config{MemberRegSocket: \"127.0.0.1:9999\", PeerSocket: \"127.0.0.1:9009\"}\n\n\t// launch proxy server\n\tgo acceptClusterMember(9999)\n\tgo sendClusterMembers(9009)\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tsender, err := NewWithConfig(1, \"127.0.0.1\", 5021, &conf)\n\tif err != nil {\n\t\tt.Errorf(\"Error in creating server \", err.Error())\n\t}\n\n\treceiver, err := NewWithConfig(2, \"127.0.0.1\", 5022, &conf)\n\tif err != nil {\n\t\tt.Errorf(\"Error in creating server \", err.Error())\n\t}\n\terrorChannel := make(chan error, 1)\n\tcount := 1000\n\n\tvar buf bytes.Buffer\n\tfor i := 0; i < messageSize; i += 1 {\n\t\tbuf.WriteString(\"1\")\n\t}\n\tdone := make(chan bool, 1)\n\tgo sendHugeMessages(sender.Outbox(), errorChannel, buf.String(), count, 2, 1)\n\tgo recvHugeMessages(receiver.Inbox(), errorChannel, count, done)\n\tselect {\n\tcase <-done:\n\t\tfmt.Println(\"TestOneWaySend passed successfully\")\n\t\tbreak\n\tcase err := <-errorChannel:\n\t\tt.Errorf(\"Error in sending large message.\\n\" + err.Error())\n\tcase <-time.After(5 * time.Minute):\n\t\tt.Errorf(\"Could not send \", strconv.Itoa(count), \" messages in 5 minute\")\n\t\tbreak\n\t}\n}",
"func send(client *rpc.Client, from string, to string, message string) {\n\tif from == \"\" || message == \"\" {\n\t\treturn\n\t}\n\n\tvar err error\n\n\t// Send Server.Put RPC.\n\t// TODO: Use client to send an RPC.\n\n\tif err != nil {\n\t\tglog.Errorf(\"Error sending: %v\", err)\n\t\treturn\n\t}\n\tfmt.Println(\"ok\")\n}",
"func TestCommand(t *testing.T) {\n\tvar serverConfig Config\n\tvar clientConfig Config\n\tvar server ssntpEchoServer\n\tvar client ssntpClient\n\n\tserver.t = t\n\tclient.t = t\n\tclient.cmdChannel = make(chan string)\n\tclient.typeChannel = make(chan string)\n\tserverConfig.Transport = *transport\n\tclientConfig.Transport = *transport\n\n\tgo server.ssntp.Serve(&serverConfig, &server)\n\ttime.Sleep(500 * time.Millisecond)\n\terr := client.ssntp.Dial(&clientConfig, &client)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to connect\")\n\t}\n\n\tclient.payload = []byte{'Y', 'A', 'M', 'L'}\n\tclient.ssntp.SendCommand(START, client.payload)\n\n\tdefer func() {\n\t\tclient.ssntp.Close()\n\t\tserver.ssntp.Stop()\n\t}()\n\n\tselect {\n\tcase frameType := <-client.typeChannel:\n\t\tif frameType != COMMAND.String() {\n\t\t\tt.Fatalf(\"Did not receive the right frame type\")\n\t\t}\n\tcase <-time.After(time.Second):\n\t\tt.Fatalf(\"Did not receive the command notification\")\n\t}\n\n\tselect {\n\tcase check := <-client.cmdChannel:\n\t\tif check != START.String() {\n\t\t\tt.Fatalf(\"Did not receive the right payload\")\n\t\t}\n\tcase <-time.After(time.Second):\n\t\tt.Fatalf(\"Did not receive the command notification\")\n\t}\n}",
"func TestSynchronousChat(t *testing.T) {\r\n\r\n\talice := NewChatter()\r\n\tbob := NewChatter()\r\n\tSkipOnError(t, DoHandshake(t, alice, bob))\r\n\r\n\tif VERBOSE {\r\n\t\tfmt.Println(\"\\n-------------------------------\")\r\n\t\tfmt.Println(\"Starting short synchronous test sequence\")\r\n\t\tfmt.Printf(\"-------------------------------\\n\\n\")\r\n\t}\r\n\r\n\tFailOnError(t, CheckSendReceive(t, alice, bob, \"Hello there!\"))\r\n\tFailOnError(t, CheckSendReceive(t, bob, alice, \"General Kenobi, you are a bold one\"))\r\n\tFailOnError(t, CheckSendReceive(t, bob, alice, \"I find your behavior bewildering...\"))\r\n\tFailOnError(t, CheckSendReceive(t, bob, alice, \"Surely you realize you're doomed\"))\r\n\tFailOnError(t, CheckSendReceive(t, bob, alice, \"Kill him!\"))\r\n\tFailOnError(t, CheckSendReceive(t, bob, alice, \"Back away. I will deal with this Jedi slime myself. \"))\r\n\tFailOnError(t, CheckSendReceive(t, alice, bob, \"Your move\"))\r\n\tFailOnError(t, CheckSendReceive(t, alice, bob, \"...\"))\r\n\tFailOnError(t, CheckSendReceive(t, bob, alice, \" You fool. I have been trained in your Jedi arts by Count Dooku himself.\"))\r\n\tFailOnError(t, CheckSendReceive(t, bob, alice, \"Attack, Kenobi.\"))\r\n\tFailOnError(t, CheckSendReceive(t, alice, bob, \"You forget I trained the Jedi that defeated Count Dooku!\"))\r\n\tFailOnError(t, CheckSendReceive(t, alice, bob, \"I may not defeat your droids, but my troops certainly will.\"))\r\n\tFailOnError(t, CheckSendReceive(t, bob, alice, \"Army or not, you must realize you are doomed.\"))\r\n\tFailOnError(t, CheckSendReceive(t, alice, bob, \"I don't think so.\"))\r\n}",
"func TestReceiver(t *testing.T) {\n\t// Set up our variables.\n\tdataChan := make(chan []byte)\n\tconn := new(MockConn)\n\tconn.SetBuffer = make(chan []byte)\n\n\t// Prepare the messages we're going to receive.\n\tmessages := [][]byte{\n\t\t[]byte(\"PASS secretpasswordhere\"),\n\t\t[]byte(\"SERVICE dict * *.fr 0 0 :French Dictionary\"),\n\t\t[]byte(\":[email protected] QUIT :Gone to have lunch\"),\n\t}\n\n\t// Start the goroutine.\n\tgo Receiver(conn, dataChan)\n\n\tfor _, msg := range messages {\n\t\t// Send the message in.\n\t\tconn.SetBuffer <- msg\n\n\t\t// Receieve the message back out.\n\t\trecvMsg := <-dataChan\n\t\trecvMsg = bytes.TrimRight(recvMsg, \"\\x00\")\n\n\t\t// If they aren't the same, something horrible happened.\n\t\tif cmp := bytes.Compare(recvMsg, msg); cmp != 0 {\n\t\t\tt.Errorf(\n\t\t\t\t\"Failed to read correctly: expected %v, got %v\",\n\t\t\t\tmsg,\n\t\t\t\trecvMsg)\n\t\t}\n\t}\n\n\t// Close the channel and confirm the tests don't panic.\n\tclose(dataChan)\n\tconn.SetBuffer <- []byte(\"Last test\")\n\ttime.Sleep(100 * time.Millisecond)\n}",
"func TestRetransmission(t *testing.T) {\n\tt.Skip(\"To test: OnMessage not implemented yet\")\n\n\tassert := assert.New(t)\n\tnetwork := newMockNetwork()\n\n\tsrv1 := setupNodeOnMockNetworkExt(t, network.createConnection(), \"\", func(config *Config) {\n\t\tconfig.RetryCount = 5\n\t})\n\tsrv2 := setupNodeOnMockNetwork(t, network.createConnection(), srv1.LocalAddr().String())\n\n\tlog.Info(\"Network prepared\")\n\tc := 0\n\tvar cLock sync.Mutex\n\tsrv2.onMessage(func(message *mcc.Message) bool {\n\t\tswitch message.Payload.(type) {\n\t\tcase *pb.Message_Ping:\n\t\t\tcLock.Lock()\n\t\t\tdefer cLock.Unlock()\n\t\t\tc++\n\t\t\treturn c > 2\n\t\tdefault:\n\t\t\treturn true\n\t\t}\n\t})\n\n\tinfo := &Info{\n\t\tcert: srv2.cert,\n\t}\n\terr := srv1.ping(info)\n\tassert.Nil(err)\n\tassert.Equal(3, c)\n}",
"func sendMsgToClient(clientAdd string, clientMSG string) bool {\r\n\tfor i := 0; i < len(clients); i++ {\r\n\t\tif clients[i].c.RemoteAddr().String() == clientAdd{\r\n\t\t\tsendClient := \"ACK \" + clientMSG + \"\\n\"\r\n\t\t\tclients[i].c.Write([]byte(string(sendClient)))\r\n\t\t\treturn true\r\n\t\t}\r\n\t}\r\n\treturn false\r\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test sending large random messages between server and client | func TestLargeSend(t *testing.T) {
// Create server
server, serverEvent := NewServer[[]byte, []byte]()
assert(!server.Serving(), t, "Server should not be serving")
// Start server
err := server.Start("127.0.0.1", 0)
assertNoErr(err, t)
assert(server.Serving(), t, "Server should be serving")
time.Sleep(waitTime)
// Check server address info
host, port, err := server.GetAddr()
assertNoErr(err, t)
assert(server.sock.Addr().String() == host+":"+strconv.Itoa(int(port)), t, "Address strings don't match")
fmt.Printf("Server address: %s:%d\n", host, port)
// Create client
client, clientEvent := NewClient[[]byte, []byte]()
assert(!client.Connected(), t, "Client should not be connected")
// Connect to server
err = client.Connect(host, port)
assertNoErr(err, t)
assert(client.Connected(), t, "Client should be connected")
time.Sleep(waitTime)
// Check client address info
host, port, err = client.GetAddr()
assertNoErr(err, t)
assert(client.sock.LocalAddr().String() == host+":"+strconv.Itoa(int(port)), t, "Address strings don't match")
fmt.Printf("Client address: %s:%d\n", host, port)
// Check connect event was received
clientConnectEvent := <-serverEvent
assertEq(clientConnectEvent, ServerEvent[[]byte]{
EventType: ServerConnect,
ClientID: 0,
}, t)
// Generate large messages
largeMessageFromServerLength := rand.Int() % 65536
largeMessageFromServer := make([]byte, largeMessageFromServerLength)
n, err := rand.Read(largeMessageFromServer)
assertNoErr(err, t)
assertEq(n, largeMessageFromServerLength, t)
fmt.Printf("Generated large message from server (%d bytes)\n", largeMessageFromServerLength)
largeMessageFromClientLength := rand.Int() % 32768
largeMessageFromClient := make([]byte, largeMessageFromClientLength)
n, err = rand.Read(largeMessageFromClient)
assertNoErr(err, t)
assertEq(n, largeMessageFromClientLength, t)
fmt.Printf("Generated large message from client (%d bytes)\n", largeMessageFromClientLength)
// Send large message to client
err = server.Send(largeMessageFromServer)
assertNoErr(err, t)
time.Sleep(waitTime)
// Receive large message from server
dataFromServer := <-clientEvent
assertEq(dataFromServer, ClientEvent[[]byte]{
EventType: ClientReceive,
Data: largeMessageFromServer,
}, t)
// Send large message to server
err = client.Send(largeMessageFromClient)
assertNoErr(err, t)
time.Sleep(waitTime)
// Receive large message from client
dataFromClient := <-serverEvent
assertEq(dataFromClient, ServerEvent[[]byte]{
EventType: ServerReceive,
ClientID: 0,
Data: largeMessageFromClient,
}, t)
// Disconnect from server
err = client.Disconnect()
assertNoErr(err, t)
time.Sleep(waitTime)
// Check disconnect event was received
clientDisconnectEvent := <-serverEvent
assertEq(clientDisconnectEvent, ServerEvent[[]byte]{
EventType: ServerDisconnect,
ClientID: 0,
}, t)
// Stop server
err = server.Stop()
assertNoErr(err, t)
assert(!server.Serving(), t, "Server should not be serving")
time.Sleep(waitTime)
} | [
"func TestSendingNumerousMessages(t *testing.T) {\n\t// Create server\n\tserver, serverEvent := NewServer[int, int]()\n\tassert(!server.Serving(), t, \"Server should not be serving\")\n\n\t// Start server\n\terr := server.Start(\"127.0.0.1\", 0)\n\tassertNoErr(err, t)\n\tassert(server.Serving(), t, \"Server should be serving\")\n\ttime.Sleep(waitTime)\n\n\t// Check server address info\n\thost, port, err := server.GetAddr()\n\tassertNoErr(err, t)\n\tassert(server.sock.Addr().String() == host+\":\"+strconv.Itoa(int(port)), t, \"Address strings don't match\")\n\tfmt.Printf(\"Server address: %s:%d\\n\", host, port)\n\n\t// Create client\n\tclient, clientEvent := NewClient[int, int]()\n\tassert(!client.Connected(), t, \"Client should not be connected\")\n\n\t// Connect to server\n\terr = client.Connect(host, port)\n\tassertNoErr(err, t)\n\tassert(client.Connected(), t, \"Client should be connected\")\n\ttime.Sleep(waitTime)\n\n\t// Check client address info\n\thost, port, err = client.GetAddr()\n\tassertNoErr(err, t)\n\tassert(client.sock.LocalAddr().String() == host+\":\"+strconv.Itoa(int(port)), t, \"Address strings don't match\")\n\tfmt.Printf(\"Client address: %s:%d\\n\", host, port)\n\n\t// Check connect event was received\n\tclientConnectEvent := <-serverEvent\n\tassertEq(clientConnectEvent, ServerEvent[int]{\n\t\tEventType: ServerConnect,\n\t\tClientID: 0,\n\t}, t)\n\n\t// Generate messages\n\tnumServerMessages := (rand.Int() % 64) + 64\n\tnumClientMessages := (rand.Int() % 128) + 128\n\tserverMessages := make([]int, numServerMessages)\n\tclientMessages := make([]int, numClientMessages)\n\tfor i := 0; i < numServerMessages; i++ {\n\t\tserverMessages = append(serverMessages, rand.Int()%1024)\n\t}\n\tfor i := 0; i < numClientMessages; i++ {\n\t\tclientMessages = append(clientMessages, rand.Int()%1024)\n\t}\n\tfmt.Printf(\"Generated %d server messages\\n\", numServerMessages)\n\tfmt.Printf(\"Generated %d client messages\\n\", numClientMessages)\n\n\t// Send messages\n\tfor _, serverMessage := range serverMessages {\n\t\terr := client.Send(serverMessage)\n\t\tassertNoErr(err, t)\n\t}\n\tfor _, clientMessage := range clientMessages {\n\t\terr := server.Send(clientMessage)\n\t\tassertNoErr(err, t)\n\t}\n\ttime.Sleep(waitTime)\n\n\t// Receive messages from client\n\tfor _, serverMessage := range serverMessages {\n\t\tserverReceiveEvent := <-serverEvent\n\t\tassertEq(serverReceiveEvent, ServerEvent[int]{\n\t\t\tEventType: ServerReceive,\n\t\t\tClientID: 0,\n\t\t\tData: serverMessage,\n\t\t}, t)\n\t}\n\n\t// Receive messages from server\n\tfor _, clientMessage := range clientMessages {\n\t\tclientReceiveEvent := <-clientEvent\n\t\tassertEq(clientReceiveEvent, ClientEvent[int]{\n\t\t\tEventType: ClientReceive,\n\t\t\tData: clientMessage,\n\t\t}, t)\n\t}\n\n\t// Disconnect from server\n\terr = client.Disconnect()\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Check disconnect event was received\n\tclientDisconnectEvent := <-serverEvent\n\tassertEq(clientDisconnectEvent, ServerEvent[int]{\n\t\tEventType: ServerDisconnect,\n\t\tClientID: 0,\n\t}, t)\n\n\t// Stop server\n\terr = server.Stop()\n\tassertNoErr(err, t)\n\tassert(!server.Serving(), t, \"Server should not be serving\")\n\ttime.Sleep(waitTime)\n}",
"func _TestHugeMessages(t *testing.T) {\n\tfmt.Println(\"Starting large message send test. Be patient, the test may run for several minutes\")\n\tfmt.Println(\"TestHugeMessages sends 1k messages each of 10^7 bytes in length\")\n\tconf := Config{MemberRegSocket: \"127.0.0.1:9999\", PeerSocket: \"127.0.0.1:9009\"}\n\n\t// launch proxy server\n\tgo acceptClusterMember(9999)\n\tgo sendClusterMembers(9009)\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tsender, err := NewWithConfig(1, \"127.0.0.1\", 5021, &conf)\n\tif err != nil {\n\t\tt.Errorf(\"Error in creating server \", err.Error())\n\t}\n\n\treceiver, err := NewWithConfig(2, \"127.0.0.1\", 5022, &conf)\n\tif err != nil {\n\t\tt.Errorf(\"Error in creating server \", err.Error())\n\t}\n\terrorChannel := make(chan error, 1)\n\tcount := 1000\n\n\tvar buf bytes.Buffer\n\tfor i := 0; i < messageSize; i += 1 {\n\t\tbuf.WriteString(\"1\")\n\t}\n\tdone := make(chan bool, 1)\n\tgo sendHugeMessages(sender.Outbox(), errorChannel, buf.String(), count, 2, 1)\n\tgo recvHugeMessages(receiver.Inbox(), errorChannel, count, done)\n\tselect {\n\tcase <-done:\n\t\tfmt.Println(\"TestOneWaySend passed successfully\")\n\t\tbreak\n\tcase err := <-errorChannel:\n\t\tt.Errorf(\"Error in sending large message.\\n\" + err.Error())\n\tcase <-time.After(5 * time.Minute):\n\t\tt.Errorf(\"Could not send \", strconv.Itoa(count), \" messages in 5 minute\")\n\t\tbreak\n\t}\n}",
"func TestStress(t *testing.T) {\n\trunServerClient(t, func(c *lisafs.Client) {\n\t\tconcurrency := 8\n\t\tnumMsgPerGoroutine := 5000\n\t\tvar clientWg sync.WaitGroup\n\t\tfor i := 0; i < concurrency; i++ {\n\t\t\tclientWg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer clientWg.Done()\n\n\t\t\t\tfor j := 0; j < numMsgPerGoroutine; j++ {\n\t\t\t\t\t// Create a massive random message.\n\t\t\t\t\tvar req lisafs.MsgDynamic\n\t\t\t\t\treq.Randomize(100)\n\n\t\t\t\t\tvar resp lisafs.MsgDynamic\n\t\t\t\t\tif err := c.SndRcvMessage(dynamicMsgID, uint32(req.SizeBytes()), req.MarshalBytes, resp.CheckedUnmarshal, nil, req.String, resp.String); err != nil {\n\t\t\t\t\t\tt.Errorf(\"SndRcvMessage: received unexpected error %v\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tif !reflect.DeepEqual(&req, &resp) {\n\t\t\t\t\t\tt.Errorf(\"response should be the same as request: request = %+v, response = %+v\", req, resp)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\n\t\tclientWg.Wait()\n\t})\n}",
"func TestOneWaySend(t *testing.T) {\n\tfmt.Println(\"Starting one way send test. Be patient, the test may run for several minutes\")\n\tfmt.Println(\"One way send test case launches two servers and sends 10k messages from one \")\n\tfmt.Println(\"server to the other. Test checks that exactly one copy of every message is received\")\n\tconf := Config{MemberRegSocket: \"127.0.0.1:9999\", PeerSocket: \"127.0.0.1:9009\"}\n\n\t// launch proxy server\n\tgo acceptClusterMember(9999)\n\tgo sendClusterMembers(9009)\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tsender, err := NewWithConfig(1, \"127.0.0.1\", 5011, &conf)\n\tif err != nil {\n\t\tt.Errorf(\"Error in creating server \", err.Error())\n\t}\n\n\treceiver, err := NewWithConfig(2, \"127.0.0.1\", 5012, &conf)\n\tif err != nil {\n\t\tt.Errorf(\"Error in creating server \", err.Error())\n\t}\n\n\tdone := make(chan bool, 1)\n\tcount := 10000\n\n\trecord := make([]uint32, count)\n\n\tgo receive(receiver.Inbox(), record, count, 2, done)\n\tgo sendMessages(sender.Outbox(), count, 2, 1)\n\tselect {\n\tcase <-done:\n\t\tfmt.Println(\"TestOneWaySend passed successfully\")\n\t\tbreak\n\tcase <-time.After(5 * time.Minute):\n\t\tt.Errorf(\"Could not send \", strconv.Itoa(count), \" messages in 5 minute\")\n\t\tbreak\n\t}\n}",
"func TestSender(t *testing.T) {\n\t// Set up our variables.\n\tdataChan := make(chan []byte)\n\tconn := new(MockConn)\n\tconn.SetBuffer = make(chan []byte)\n\n\t// Prepare the messages we're going to send.\n\tmessages := [][]byte{\n\t\t[]byte(\"PASS secretpasswordhere\"),\n\t\t[]byte(\"SERVICE dict * *.fr 0 0 :French Dictionary\"),\n\t\t[]byte(\":[email protected] QUIT :Gone to have lunch\"),\n\t}\n\n\t// Start the goroutine.\n\tgo Sender(conn, dataChan)\n\n\tfor _, msg := range messages {\n\t\tdataChan <- msg\n\t\tconn.SetBuffer <- msg\n\n\t\t// Sleep to give the other goroutine some time to make the copy. This slows the tests,\n\t\t// but we just have to accept that.\n\t\ttime.Sleep(200)\n\n\t\tif cmp := bytes.Compare(conn.Buffer, msg); cmp != 0 {\n\t\t\tt.Errorf(\n\t\t\t\t\"Failed to write correctly: expected %v, got %v.\",\n\t\t\t\tmsg,\n\t\t\t\tconn.Buffer)\n\t\t}\n\t}\n\n\t// Close the channel.\n\tclose(dataChan)\n}",
"func Test2ClientChatUsingMultipleClients(t *testing.T) {\n\n\t//code to create random string for the datapoint\n\tconst charset = \"abcdefghijklmnopqrstuvwxyz\" +\n\t\t\"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n\n\tconst numclients = 100\n\tconst datapoints = 2\n\n\tvar seededRand = rand.New(\n\t\trand.NewSource(time.Now().UnixNano()))\n\n\tstringmaker := func(length int, charset string) string {\n\t\tb := make([]byte, length)\n\t\tfor i := range b {\n\t\t\tb[i] = charset[seededRand.Intn(len(charset))]\n\t\t}\n\t\treturn string(b)\n\t}\n\tgetstring := func(length int) string {\n\t\treturn stringmaker(length, charset)\n\t}\n\n\tstart := time.Now()\n\tdata := make([]string, datapoints)\n\tdone := make(chan bool, numclients)\n\t//start all the clients and pass the two msgs to each of them to broadcast\n\tfor i := 0; i < numclients; i++ {\n\t\tfor j := 0; j < datapoints; j++ {\n\t\t\tdata[j] = getstring(10)\n\t\t}\n\t\tgo startClient(data, done)\n\t}\n\t//wait for all the clients to finish\n\tcount := 0\n\tfor i := 1; i <= numclients; i++ {\n\t\t<-done\n\t\tcount = count + 1\n\t}\n\tassertEquals(t, count, numclients)\n\telapsed := time.Since(start)\n\t//measure the elapsed time\n\tfmt.Println(elapsed)\n}",
"func TestLotsOfDataManyStreams(t *testing.T) {\n\t// Skip on windows because of https://github.com/libp2p/go-libp2p/issues/2341\n\tif runtime.GOOS == \"windows\" {\n\t\tt.Skip(\"Skipping on windows because of https://github.com/libp2p/go-libp2p/issues/2341\")\n\t}\n\n\t// 64k buffer\n\tconst bufSize = 64 << 10\n\tsendBuf := [bufSize]byte{}\n\tconst totalStreams = 512\n\tconst parallel = 8\n\t// Total sends are > 20MiB\n\trequire.Greater(t, len(sendBuf)*totalStreams, 20<<20)\n\tt.Log(\"Total sends:\", len(sendBuf)*totalStreams)\n\n\t// Fill with random bytes\n\t_, err := rand.Read(sendBuf[:])\n\trequire.NoError(t, err)\n\n\tfor _, tc := range transportsToTest {\n\t\tt.Run(tc.Name, func(t *testing.T) {\n\t\t\th1 := tc.HostGenerator(t, TransportTestCaseOpts{})\n\t\t\th2 := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true})\n\t\t\tdefer h1.Close()\n\t\t\tdefer h2.Close()\n\t\t\tstart := time.Now()\n\t\t\tdefer func() {\n\t\t\t\tt.Log(\"Total time:\", time.Since(start))\n\t\t\t}()\n\n\t\t\trequire.NoError(t, h2.Connect(context.Background(), peer.AddrInfo{\n\t\t\t\tID: h1.ID(),\n\t\t\t\tAddrs: h1.Addrs(),\n\t\t\t}))\n\n\t\t\th1.SetStreamHandler(\"/big-ping\", func(s network.Stream) {\n\t\t\t\tio.Copy(s, s)\n\t\t\t\ts.Close()\n\t\t\t})\n\n\t\t\tsem := make(chan struct{}, parallel)\n\t\t\tvar wg sync.WaitGroup\n\t\t\tfor i := 0; i < totalStreams; i++ {\n\t\t\t\twg.Add(1)\n\t\t\t\tsem <- struct{}{}\n\t\t\t\tgo func() {\n\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\trecvBuf := [bufSize]byte{}\n\t\t\t\t\tdefer func() { <-sem }()\n\n\t\t\t\t\ts, err := h2.NewStream(context.Background(), h1.ID(), \"/big-ping\")\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\tdefer s.Close()\n\n\t\t\t\t\t_, err = s.Write(sendBuf[:])\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\ts.CloseWrite()\n\n\t\t\t\t\t_, err = io.ReadFull(s, recvBuf[:])\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\trequire.Equal(t, sendBuf, recvBuf)\n\n\t\t\t\t\t_, err = s.Read([]byte{0})\n\t\t\t\t\trequire.ErrorIs(t, err, io.EOF)\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\twg.Wait()\n\t\t})\n\t}\n}",
"func TestSend(t *testing.T) {\n\t// Create server\n\tserver, serverEvent := NewServer[int, string]()\n\tassert(!server.Serving(), t, \"Server should not be serving\")\n\n\t// Start server\n\terr := server.Start(\"127.0.0.1\", 0)\n\tassertNoErr(err, t)\n\tassert(server.Serving(), t, \"Server should be serving\")\n\ttime.Sleep(waitTime)\n\n\t// Check server address info\n\thost, port, err := server.GetAddr()\n\tassertNoErr(err, t)\n\tassert(server.sock.Addr().String() == host+\":\"+strconv.Itoa(int(port)), t, \"Address strings don't match\")\n\tfmt.Printf(\"Server address: %s:%d\\n\", host, port)\n\n\t// Create client\n\tclient, clientEvent := NewClient[string, int]()\n\tassert(!client.Connected(), t, \"Client should not be connected\")\n\n\t// Connect to server\n\terr = client.Connect(host, port)\n\tassertNoErr(err, t)\n\tassert(client.Connected(), t, \"Client should be connected\")\n\ttime.Sleep(waitTime)\n\n\t// Check client address info\n\thost, port, err = client.GetAddr()\n\tassertNoErr(err, t)\n\tassert(client.sock.LocalAddr().String() == host+\":\"+strconv.Itoa(int(port)), t, \"Address strings don't match\")\n\tfmt.Printf(\"Client address: %s:%d\\n\", host, port)\n\n\t// Check connect event was received\n\tclientConnectEvent := <-serverEvent\n\tassertEq(clientConnectEvent, ServerEvent[string]{\n\t\tEventType: ServerConnect,\n\t\tClientID: 0,\n\t}, t)\n\n\t// Send message to client\n\tmessageFromServer := 29275\n\terr = server.Send(messageFromServer)\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Receive message from server\n\tclientReceiveEvent1 := <-clientEvent\n\tassertEq(clientReceiveEvent1, ClientEvent[int]{\n\t\tEventType: ClientReceive,\n\t\tData: messageFromServer,\n\t}, t)\n\ttime.Sleep(waitTime)\n\n\t// Send message to server\n\tmessageFromClient := \"Hello, server!\"\n\terr = client.Send(messageFromClient)\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Receive message from client\n\tserverReceiveEvent := <-serverEvent\n\tassertEq(serverReceiveEvent, ServerEvent[string]{\n\t\tEventType: ServerReceive,\n\t\tClientID: 0,\n\t\tData: messageFromClient,\n\t}, t)\n\ttime.Sleep(waitTime)\n\n\t// Send response to client\n\terr = server.Send(len(serverReceiveEvent.Data))\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Receive response from server\n\tclientReceiveEvent2 := <-clientEvent\n\tassertEq(clientReceiveEvent2, ClientEvent[int]{\n\t\tEventType: ClientReceive,\n\t\tData: len(messageFromClient),\n\t}, t)\n\ttime.Sleep(waitTime)\n\n\t// Disconnect from server\n\terr = client.Disconnect()\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Check disconnect event was received\n\tclientDisconnectEvent := <-serverEvent\n\tassertEq(clientDisconnectEvent, ServerEvent[string]{\n\t\tEventType: ServerDisconnect,\n\t\tClientID: 0,\n\t}, t)\n\n\t// Stop server\n\terr = server.Stop()\n\tassertNoErr(err, t)\n\tassert(!server.Serving(), t, \"Server should not be serving\")\n\ttime.Sleep(waitTime)\n}",
"func main() {\np, _ := rocketmq.NewProducer(\nproducer.WithNameServer([]string{\"127.0.0.1:9876\"}),\nproducer.WithRetry(2),\n)\nerr := p.Start()\nif err != nil {\nfmt.Printf(\"start producer error: %s\", err.Error())\nos.Exit(1)\n}\nfor i := 0; i < 1000; i++ {\nres, err := p.SendSync(context.Background(), primitive.NewMessage(\"test\",\n[]byte(\"Hello RocketMQ Go Client!\")))\n\nif err != nil {\nfmt.Printf(\"send message error: %s\\n\", err)\n} else {\nfmt.Printf(\"send message success: result=%s\\n\", res.String())\n}\n}\nerr = p.Shutdown()\nif err != nil {\nfmt.Printf(\"shutdown producer error: %s\", err.Error())\n}\n}",
"func TestSynchronousChatExtended(t *testing.T) {\r\n\t/*\r\n\t\tif testing.Short() {\r\n\t\t\tt.Skip(\"Skipping extended text in short mode.\")\r\n\t\t}\r\n\r\n\t\tchatters, err := SetupChatters(t, EXTENDED_TEST_PARTICIPANTS)\r\n\t\tSkipOnError(t, err)\r\n\r\n\t\tif VERBOSE {\r\n\t\t\tfmt.Println(\"\\n-------------------------------\")\r\n\t\t\tfmt.Printf(\"Starting extended synchronous testing, %d participants, %d rounds\\n\",\r\n\t\t\t\tEXTENDED_TEST_PARTICIPANTS,\r\n\t\t\t\tEXTENDED_TEST_ROUNDS)\r\n\t\t\tfmt.Printf(\"-------------------------------\\n\\n\")\r\n\t\t}\r\n\r\n\t\tfor i := 0; i < EXTENDED_TEST_ROUNDS; i++ {\r\n\r\n\t\t\tc1 := chatters[rand.Int()%len(chatters)]\r\n\t\t\tc2 := chatters[rand.Int()%len(chatters)]\r\n\t\t\tif c1 == c2 {\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\t\tm := fmt.Sprintf(\"M%d\", i)\r\n\t\t\tif VERBOSE {\r\n\t\t\t\tfmt.Printf(\"Message \\\"%s\\\" to be delivered from %s to %s\\n\",\r\n\t\t\t\t\tm,\r\n\t\t\t\t\tPrintHandle(&c1.Identity.PublicKey),\r\n\t\t\t\t\tPrintHandle(&c2.Identity.PublicKey))\r\n\t\t\t}\r\n\r\n\t\t\tFailOnError(t, CheckSendReceive(t, c1, c2, m))\r\n\t\t}\r\n\r\n\t*/\r\n}",
"func TestMuxSendReceiveParallel(t *testing.T) {\n\tclient, server := createTestingMuxs()\n\tdefer client.Close()\n\tdefer server.Close()\n\tdata := fastrand.Bytes(int(client.settings.MaxFrameSize()) * 5)\n\n\tstart := make(chan struct{})\n\t// Server thread.\n\tserverWorker := func() {\n\t\t<-start\n\t\t// Wait for a stream.\n\t\tstream, err := server.AcceptStream()\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\t// Read some data.\n\t\treceivedData := make([]byte, len(data))\n\t\tif _, err := io.ReadFull(stream, receivedData); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\t// The data should match.\n\t\tif !bytes.Equal(receivedData, data) {\n\t\t\tt.Error(\"server: received data didn't match\")\n\t\t\treturn\n\t\t}\n\t\t// Send the data back.\n\t\twritten, err := stream.Write(receivedData)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif written < len(receivedData) {\n\t\t\tt.Errorf(\"server: not enough data written: %v < %v\", written, len(receivedData))\n\t\t\treturn\n\t\t}\n\t\t// Wait a bit before closing the stream to give the peer some time to\n\t\t// read the data.\n\t\ttime.Sleep(time.Second)\n\t\t// Close the stream.\n\t\tif err := stream.Close(); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\t// Client thread.\n\tclientWorker := func() {\n\t\t<-start\n\t\t// Create a new stream.\n\t\tstream, err := client.NewStream()\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\t// Write some data.\n\t\twritten, err := stream.Write(data)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif written < len(data) {\n\t\t\tt.Errorf(\"client: not enough data written: %v < %v\", written, len(data))\n\t\t\treturn\n\t\t}\n\t\t// Read some data.\n\t\treceivedData := make([]byte, len(data))\n\t\tif _, err := io.ReadFull(stream, receivedData); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\t// The data should match.\n\t\tif !bytes.Equal(receivedData, data) {\n\t\t\tt.Error(\"client: received data didn't match\")\n\t\t\treturn\n\t\t}\n\t\t// Close the stream.\n\t\tif err := stream.Close(); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\t// Spin up the thread pairs.\n\tvar wg sync.WaitGroup\n\tnumThreadPairs := runtime.NumCPU() * 10\n\tfor i := 0; i < numThreadPairs; i++ {\n\t\twg.Add(2)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tclientWorker()\n\t\t}()\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tserverWorker()\n\t\t}()\n\t}\n\t// Wait for client and server threads to be done.\n\tclose(start)\n\twg.Wait()\n}",
"func Test1ServerBroadcastLatency(t *testing.T) {\n\n\tstart := time.Now()\n\tdata := make([]string, 2)\n\tdata[0] = \"[bye]\"//single datapoint that will be used\n\tdone := make(chan bool, 2)\n\t//start both the clients\n\tgo startClient(data, done)\n\tgo startClient(nil, done)\n\tcount := 0\n\t//wait for the clients to exit\n\tfor i := 1; i <= 2; i++ {\n\t\t<-done\n\t\tcount = count + 1\n\t}\n\tassertEquals(t, count, 2)\n\telapsed := time.Since(start)\n\t//elapsed time depicts the server broadcast latency\n\tfmt.Println(elapsed)\n\n}",
"func TestResendTimeout(t *testing.T) {\n\tctx, cancelFunc := context.WithCancel(context.Background())\n\tdefer cancelFunc()\n\n\tconst (\n\t\tvdisk = \"12345\"\n\t\tfirstSequence = 0\n\t\tnumLogs = 500\n\t)\n\n\tclean, configSource, _ := newZeroStorDefaultConfig(t, vdisk)\n\tdefer clean()\n\t// only used in client.connect\n\t// TODO : remove the need to this unused server\n\tunusedServer, err := server.NewServer(testConf, configSource)\n\tassert.Nil(t, err)\n\tgo unusedServer.Listen(ctx)\n\n\t// list of sequences for the server to ignores.\n\t// it simulates timeout\n\tlogsToIgnore := map[uint64]struct{}{}\n\tfor i := 0; i < numLogs; i++ {\n\t\tif i%5 == 0 {\n\t\t\tlogsToIgnore[uint64(i)] = struct{}{}\n\t\t}\n\t}\n\n\tds := newDummyServer(unusedServer)\n\tgo ds.run(t, logsToIgnore)\n\n\tclient, err := newClient([]string{unusedServer.ListenAddr()}, vdisk)\n\tassert.Nil(t, err)\n\tdefer client.Close()\n\n\tdata := make([]byte, 4096)\n\n\tclient.bw = ds.reqPipeWriter // fake client writer\n\tclient.rd = ds.respPipeReader // fake the reader\n\n\tgo client.run(client.ctx)\n\n\tvar wg sync.WaitGroup\n\n\t// start receiver goroutine\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\twaitForBlockReceivedResponse(t, client, 0, numLogs-1)\n\t}()\n\n\t// send the logs\n\tfor i := 0; i < numLogs; i++ {\n\t\tx := uint64(i)\n\n\t\t// send\n\t\terr := client.Send(schema.OpSet, x, int64(x), int64(x), data)\n\t\tassert.Nil(t, err)\n\t}\n\n\twg.Wait()\n}",
"func testMsgDuplicator(peerCount, maxMsg, limit int, p float32, seed int64, exp int, t *testing.T) {\n\tpeers := testMsgDuplicatorEnv(peerCount, maxMsg, limit, p, seed)\n\n\t// Peers can talk to each other.\n\tfor _, from := range peers {\n\t\tfor _, to := range peers {\n\t\t\tif from.Addr() == to.Addr() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfrom.Send(&BaseMsg{From: from.Addr(), To: to.Addr()})\n\t\t}\n\t}\n\n\t// Verify sent message count.\n\ts := collectMsgDuplicatorStats(peers)\n\tif exp != s.MsgSent {\n\t\tt.Errorf(\"expected %d and got %d\", exp, s.MsgSent)\n\t}\n}",
"func TestSynchronousChatVector(t *testing.T) {\r\n\r\n\talice := NewChatter()\r\n\tbob := NewChatter()\r\n\tSkipOnError(t, DoHandshake(t, alice, bob))\r\n\r\n\tif VERBOSE {\r\n\t\tfmt.Println(\"\\n-------------------------------\")\r\n\t\tfmt.Println(\"Starting synchronous test vector sequence\")\r\n\t\tfmt.Printf(\"-------------------------------\\n\\n\")\r\n\t}\r\n\r\n\tSetFixedRandomness(true)\r\n\tdefer SetFixedRandomness(false)\r\n\talice = NewChatter()\r\n\tbob = NewChatter()\r\n\r\n\tFailOnError(t, DoHandshake(t, alice, bob))\r\n\r\n\t//Check first message\r\n\tmessage, err := CheckSend(t, bob, alice, \"Alice?\")\r\n\tSkipOnError(t, err)\r\n\r\n\tif message.Sender == nil {\r\n\t\tt.Fatal(\"message.Sender not set\")\r\n\t}\r\n\tCheckTestVector(t, message.Sender.Fingerprint(), \"83F257B18A903848BA6CDB628E7D925B\", \"Sender\")\r\n\tif message.Receiver == nil {\r\n\t\tt.Fatal(\"message.Receiver not set\")\r\n\t}\r\n\tCheckTestVector(t, message.Receiver.Fingerprint(), \"7446CB2BE09E4967E72B861EB81BC5AF\", \"Receiver\")\r\n\tif message.NextDHRatchet == nil {\r\n\t\tt.Fatal(\"message.NextDHRatchet not set\")\r\n\t}\r\n\tCheckTestVector(t, message.NextDHRatchet.Fingerprint(), \"CE0753ABB34AFC0EDC95B3BF72924E20\", \"NextDHRatchet\")\r\n\tCheckTestVector(t, []byte{byte(message.Counter)}, \"01\", \"Counter\")\r\n\tCheckTestVector(t, []byte{byte(message.LastUpdate)}, \"01\", \"LastUpdate\")\r\n\tCheckTestVector(t, message.IV, \"0102030405060708090A0B0C\", \"IV\")\r\n\tCheckTestVector(t, message.Ciphertext, \"A0B5D420923494FFFBCB38CD7BE8E55B37DAF7912AB6\", \"Ciphertext\")\r\n\r\n\tSkipOnError(t, CheckReceive(t, alice, message, \"Alice?\"))\r\n\r\n\t//Check second message\r\n\tmessage, err = CheckSend(t, alice, bob, \"Bob...\")\r\n\tSkipOnError(t, err)\r\n\r\n\tif message.Sender == nil {\r\n\t\tt.Fatal(\"message.Sender not set\")\r\n\t}\r\n\tCheckTestVector(t, message.Sender.Fingerprint(), \"7446CB2BE09E4967E72B861EB81BC5AF\", \"Sender\")\r\n\tif message.Receiver == nil {\r\n\t\tt.Fatal(\"message.Receiver not set\")\r\n\t}\r\n\tCheckTestVector(t, message.Receiver.Fingerprint(), \"83F257B18A903848BA6CDB628E7D925B\", \"Receiver\")\r\n\tif message.NextDHRatchet == nil {\r\n\t\tt.Fatal(\"message.NextDHRatchet not set\")\r\n\t}\r\n\tCheckTestVector(t, message.NextDHRatchet.Fingerprint(), \"32F5CB5763B7D3875A3695625FB4F847\", \"NextDHRatchet\")\r\n\tCheckTestVector(t, []byte{byte(message.Counter)}, \"01\", \"Counter\")\r\n\tCheckTestVector(t, []byte{byte(message.LastUpdate)}, \"01\", \"LastUpdate\")\r\n\tCheckTestVector(t, message.IV, \"0102030405060708090A0B0C\", \"IV\")\r\n\tCheckTestVector(t, message.Ciphertext, \"6C0D932DC852E34F92B239976FE9759FBB82B041FAE6\", \"Ciphertext\")\r\n\r\n\tSkipOnError(t, CheckReceive(t, bob, message, \"Bob...\"))\r\n\r\n\t//Longer sequence, unchecked\r\n\tSkipOnError(t, CheckSendReceive(t, bob, alice, \"Alice!!\"))\r\n\tSkipOnError(t, CheckSendReceive(t, bob, alice, \"Alice!!!\"))\r\n\tSkipOnError(t, CheckSendReceive(t, bob, alice, \"Alice!!!\"))\r\n\tSkipOnError(t, CheckSendReceive(t, alice, bob, \"Bob!\"))\r\n\tSkipOnError(t, CheckSendReceive(t, alice, bob, \"I heard you the first time\"))\r\n\tSkipOnError(t, CheckSendReceive(t, alice, bob, \"No need to repeat yourself...\"))\r\n\tSkipOnError(t, CheckSendReceive(t, bob, alice, \"Sorry Alice\"))\r\n\tSkipOnError(t, CheckSendReceive(t, bob, alice, \"I got carried away\"))\r\n\tSkipOnError(t, CheckSendReceive(t, bob, alice, \"won't happen again\"))\r\n\tSkipOnError(t, CheckSendReceive(t, alice, bob, \"that's okay Bob\"))\r\n\tmessage, err = CheckSend(t, alice, bob, \"it happens!\")\r\n\tSkipOnError(t, err)\r\n\r\n\t// Check final message after extended conversation\r\n\tif message.Sender == nil {\r\n\t\tt.Fatal(\"message.Sender not set\")\r\n\t}\r\n\tCheckTestVector(t, message.Sender.Fingerprint(), \"7446CB2BE09E4967E72B861EB81BC5AF\", \"Sender\")\r\n\tif message.Receiver == nil {\r\n\t\tt.Fatal(\"message.Receiver not set\")\r\n\t}\r\n\tCheckTestVector(t, message.Receiver.Fingerprint(), \"83F257B18A903848BA6CDB628E7D925B\", \"Receiver\")\r\n\tif message.NextDHRatchet == nil {\r\n\t\tt.Fatal(\"message.NextDHRatchet not set\")\r\n\t}\r\n\tCheckTestVector(t, message.NextDHRatchet.Fingerprint(), \"9194DE8B23D5A10C5D5EC9F8CB8D7AAC\", \"NextDHRatchet\")\r\n\tCheckTestVector(t, []byte{byte(message.Counter)}, \"06\", \"Counter\")\r\n\tCheckTestVector(t, []byte{byte(message.LastUpdate)}, \"05\", \"LastUpdate\")\r\n\tCheckTestVector(t, message.IV, \"0102030405060708090A0B0C\", \"IV\")\r\n\tCheckTestVector(t, message.Ciphertext, \"A3BC2406B31F0FA9AA36BB33D3D43F0BE614D5A18C91B2D6D165E3\", \"Ciphertext\")\r\n}",
"func gatewaySendRequestsBench(b *testing.B, singleReplySub bool) {\n\tserver.SetGatewaysSolicitDelay(10 * time.Millisecond)\n\tdefer server.ResetGatewaysSolicitDelay()\n\n\tob := testDefaultBenchOptionsForGateway(\"B\")\n\tsb := RunServer(ob)\n\tdefer sb.Shutdown()\n\n\tgwbURL, err := url.Parse(fmt.Sprintf(\"nats://%s:%d\", ob.Gateway.Host, ob.Gateway.Port))\n\tif err != nil {\n\t\tb.Fatalf(\"Error parsing url: %v\", err)\n\t}\n\toa := testDefaultBenchOptionsForGateway(\"A\")\n\toa.Gateway.Gateways = []*server.RemoteGatewayOpts{\n\t\t{\n\t\t\tName: \"B\",\n\t\t\tURLs: []*url.URL{gwbURL},\n\t\t},\n\t}\n\tsa := RunServer(oa)\n\tdefer sa.Shutdown()\n\n\tsub := createClientConn(b, ob.Host, ob.Port)\n\tdefer sub.Close()\n\tdoDefaultConnect(b, sub)\n\tsendProto(b, sub, \"SUB foo 1\\r\\n\")\n\tflushConnection(b, sub)\n\n\tlenMsg := len(\"MSG foo reply.xxxxxxxxxx 1 2\\r\\nok\\r\\n\")\n\texpected := b.N * lenMsg\n\tch := make(chan bool, 1)\n\tgo drainConnection(b, sub, ch, expected)\n\n\tc := createClientConn(b, oa.Host, oa.Port)\n\tdefer c.Close()\n\tdoDefaultConnect(b, c)\n\tflushConnection(b, c)\n\n\t// From pub to server in cluster A:\n\tnumBytes := len(\"PUB foo reply.0123456789 2\\r\\nok\\r\\n\")\n\tif !singleReplySub {\n\t\t// Add the preceding SUB\n\t\tnumBytes += len(\"SUB reply.0123456789 0123456789\\r\\n\")\n\t\t// And UNSUB...\n\t\tnumBytes += len(\"UNSUB 0123456789\\r\\n\")\n\t}\n\t// From server in cluster A to cluster B\n\tnumBytes += len(\"RMSG $G foo reply.0123456789 2\\r\\nok\\r\\n\")\n\t// If mapping of reply...\n\tif !singleReplySub {\n\t\t// the mapping uses about 24 more bytes. So add them\n\t\t// for RMSG from server to server.\n\t\tnumBytes += 24\n\t}\n\t// From server in cluster B to sub\n\tnumBytes += lenMsg\n\tb.SetBytes(int64(numBytes))\n\n\tbw := bufio.NewWriterSize(c, defaultSendBufSize)\n\tvar subStr string\n\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tif !singleReplySub {\n\t\t\tsubStr = fmt.Sprintf(\"SUB reply.%010d %010d\\r\\n\", i+1, i+1)\n\t\t}\n\t\tbw.Write([]byte(fmt.Sprintf(\"%sPUB foo reply.%010d 2\\r\\nok\\r\\n\", subStr, i+1)))\n\t\t// Simulate that we are doing actual request/reply and therefore\n\t\t// unsub'ing the subs on the reply subject.\n\t\tif !singleReplySub && i > 1000 {\n\t\t\tbw.Write([]byte(fmt.Sprintf(\"UNSUB %010d\\r\\n\", (i - 1000))))\n\t\t}\n\t}\n\tbw.Flush()\n\tflushConnection(b, c)\n\n\t<-ch\n}",
"func TestInOrderDelivery(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tstopper := stop.NewStopper()\n\tdefer stopper.Stop()\n\tnodeRPCContext := rpc.NewContext(nodeTestBaseContext, hlc.NewClock(hlc.UnixNano), stopper)\n\tg := gossip.New(nodeRPCContext, gossip.TestInterval, gossip.TestBootstrap)\n\n\tserver := rpc.NewServer(util.CreateTestAddr(\"tcp\"), nodeRPCContext)\n\tif err := server.Start(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer server.Close()\n\n\tconst numMessages = 100\n\tnodeID := roachpb.NodeID(1)\n\tserverTransport, err := newRPCTransport(g, server, nodeRPCContext)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer serverTransport.Close()\n\tserverChannel := newChannelServer(numMessages, 10*time.Millisecond)\n\tif err := serverTransport.Listen(roachpb.StoreID(nodeID), serverChannel); err != nil {\n\t\tt.Fatal(err)\n\t}\n\taddr := server.Addr()\n\tif err := g.AddInfoProto(gossip.MakeNodeIDKey(nodeID),\n\t\t&roachpb.NodeDescriptor{\n\t\t\tAddress: util.MakeUnresolvedAddr(addr.Network(), addr.String()),\n\t\t},\n\t\ttime.Hour); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tclientNodeID := roachpb.NodeID(2)\n\tclientTransport, err := newRPCTransport(g, nil, nodeRPCContext)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer clientTransport.Close()\n\n\tfor i := 0; i < numMessages; i++ {\n\t\treq := &multiraft.RaftMessageRequest{\n\t\t\tGroupID: 1,\n\t\t\tMessage: raftpb.Message{\n\t\t\t\tTo: uint64(nodeID),\n\t\t\t\tFrom: uint64(clientNodeID),\n\t\t\t\tCommit: uint64(i),\n\t\t\t},\n\t\t\tToReplica: roachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: nodeID,\n\t\t\t\tStoreID: roachpb.StoreID(nodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(nodeID),\n\t\t\t},\n\t\t\tFromReplica: roachpb.ReplicaDescriptor{\n\t\t\t\tNodeID: clientNodeID,\n\t\t\t\tStoreID: roachpb.StoreID(clientNodeID),\n\t\t\t\tReplicaID: roachpb.ReplicaID(clientNodeID),\n\t\t\t},\n\t\t}\n\t\tif err := clientTransport.Send(req); err != nil {\n\t\t\tt.Errorf(\"failed to send message %d: %s\", i, err)\n\t\t}\n\t}\n\n\tfor i := 0; i < numMessages; i++ {\n\t\treq := <-serverChannel.ch\n\t\tif req.Message.Commit != uint64(i) {\n\t\t\tt.Errorf(\"messages out of order: got %d while expecting %d\", req.Message.Commit, i)\n\t\t}\n\t}\n}",
"func TestClientSend(t *testing.T) {\n\tsrc := newClient(\"abc\", nil)\n\tdest := newClient(\"def\", nil)\n\n\t// The message should be queued since dest has not registered.\n\tm := \"hello\"\n\tif err := src.send(dest, m); err != nil {\n\t\tt.Errorf(\"When dest is not registered, src.send(dest, %q) got error: %s, want nil\", m, err.Error())\n\t}\n\tif len(src.msgs) != 1 || src.msgs[0] != m {\n\t\tt.Errorf(\"After src.send(dest, %q) when dest is not registered, src.msgs = %v, want [%q]\", m, src.msgs, m)\n\t}\n\n\trwc := collidertest.MockReadWriteCloser{Closed: false}\n\tdest.register(&rwc)\n\n\t// The message should be sent this time.\n\tm2 := \"hi\"\n\tsrc.send(dest, m2)\n\n\tif rwc.Msg == \"\" {\n\t\tt.Errorf(\"When dest is registered, after src.send(dest, %q), dest.rwc.Msg = %v, want %q\", m2, rwc.Msg, m2)\n\t}\n\tif len(src.msgs) != 1 || src.msgs[0] != m {\n\t\tt.Errorf(\"When dest is registered, after src.send(dest, %q), src.msgs = %v, want [%q]\", m2, src.msgs, m)\n\t}\n}",
"func TestSendsRequests(t *testing.T) {\n\texpected := 33\n\n\tticker := make(chan time.Time)\n\tcodes := make(chan []byte, 0)\n\tf := &FakeTTY{c: codes}\n\tr := &NullReporter{}\n\tu := &NullUploader{}\n\tgo loop(ticker, f, r, u)\n\n\tticker <- time.Now()\n\n\t// allow 10 seconds to receive data\n\twait := time.Tick(10 * time.Second)\n\n\tvar i = 0\nX:\n\tfor {\n\t\tselect {\n\t\tcase c := <-codes:\n\t\t\tt.Logf(\"% x\\n\", c)\n\t\t\ti++\n\t\t\tif i == expected {\n\t\t\t\tbreak X\n\t\t\t}\n\t\tcase <-wait:\n\t\t\tt.Fatalf(\"expected %d codes but received %d\\n\", expected, i)\n\t\t}\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test sending numerous messages | func TestSendingNumerousMessages(t *testing.T) {
// Create server
server, serverEvent := NewServer[int, int]()
assert(!server.Serving(), t, "Server should not be serving")
// Start server
err := server.Start("127.0.0.1", 0)
assertNoErr(err, t)
assert(server.Serving(), t, "Server should be serving")
time.Sleep(waitTime)
// Check server address info
host, port, err := server.GetAddr()
assertNoErr(err, t)
assert(server.sock.Addr().String() == host+":"+strconv.Itoa(int(port)), t, "Address strings don't match")
fmt.Printf("Server address: %s:%d\n", host, port)
// Create client
client, clientEvent := NewClient[int, int]()
assert(!client.Connected(), t, "Client should not be connected")
// Connect to server
err = client.Connect(host, port)
assertNoErr(err, t)
assert(client.Connected(), t, "Client should be connected")
time.Sleep(waitTime)
// Check client address info
host, port, err = client.GetAddr()
assertNoErr(err, t)
assert(client.sock.LocalAddr().String() == host+":"+strconv.Itoa(int(port)), t, "Address strings don't match")
fmt.Printf("Client address: %s:%d\n", host, port)
// Check connect event was received
clientConnectEvent := <-serverEvent
assertEq(clientConnectEvent, ServerEvent[int]{
EventType: ServerConnect,
ClientID: 0,
}, t)
// Generate messages
numServerMessages := (rand.Int() % 64) + 64
numClientMessages := (rand.Int() % 128) + 128
serverMessages := make([]int, numServerMessages)
clientMessages := make([]int, numClientMessages)
for i := 0; i < numServerMessages; i++ {
serverMessages = append(serverMessages, rand.Int()%1024)
}
for i := 0; i < numClientMessages; i++ {
clientMessages = append(clientMessages, rand.Int()%1024)
}
fmt.Printf("Generated %d server messages\n", numServerMessages)
fmt.Printf("Generated %d client messages\n", numClientMessages)
// Send messages
for _, serverMessage := range serverMessages {
err := client.Send(serverMessage)
assertNoErr(err, t)
}
for _, clientMessage := range clientMessages {
err := server.Send(clientMessage)
assertNoErr(err, t)
}
time.Sleep(waitTime)
// Receive messages from client
for _, serverMessage := range serverMessages {
serverReceiveEvent := <-serverEvent
assertEq(serverReceiveEvent, ServerEvent[int]{
EventType: ServerReceive,
ClientID: 0,
Data: serverMessage,
}, t)
}
// Receive messages from server
for _, clientMessage := range clientMessages {
clientReceiveEvent := <-clientEvent
assertEq(clientReceiveEvent, ClientEvent[int]{
EventType: ClientReceive,
Data: clientMessage,
}, t)
}
// Disconnect from server
err = client.Disconnect()
assertNoErr(err, t)
time.Sleep(waitTime)
// Check disconnect event was received
clientDisconnectEvent := <-serverEvent
assertEq(clientDisconnectEvent, ServerEvent[int]{
EventType: ServerDisconnect,
ClientID: 0,
}, t)
// Stop server
err = server.Stop()
assertNoErr(err, t)
assert(!server.Serving(), t, "Server should not be serving")
time.Sleep(waitTime)
} | [
"func send(ch IPCChannel, messages []string, name string) {\n\n\tfor _, testMsg := range messages {\n\t\tlogger.Infof(\"%v sending messages: %v\", name, testMsg)\n\t\tch.Send(testMsg)\n\t\ttime.Sleep(200 * time.Millisecond)\n\t}\n}",
"func _TestHugeMessages(t *testing.T) {\n\tfmt.Println(\"Starting large message send test. Be patient, the test may run for several minutes\")\n\tfmt.Println(\"TestHugeMessages sends 1k messages each of 10^7 bytes in length\")\n\tconf := Config{MemberRegSocket: \"127.0.0.1:9999\", PeerSocket: \"127.0.0.1:9009\"}\n\n\t// launch proxy server\n\tgo acceptClusterMember(9999)\n\tgo sendClusterMembers(9009)\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tsender, err := NewWithConfig(1, \"127.0.0.1\", 5021, &conf)\n\tif err != nil {\n\t\tt.Errorf(\"Error in creating server \", err.Error())\n\t}\n\n\treceiver, err := NewWithConfig(2, \"127.0.0.1\", 5022, &conf)\n\tif err != nil {\n\t\tt.Errorf(\"Error in creating server \", err.Error())\n\t}\n\terrorChannel := make(chan error, 1)\n\tcount := 1000\n\n\tvar buf bytes.Buffer\n\tfor i := 0; i < messageSize; i += 1 {\n\t\tbuf.WriteString(\"1\")\n\t}\n\tdone := make(chan bool, 1)\n\tgo sendHugeMessages(sender.Outbox(), errorChannel, buf.String(), count, 2, 1)\n\tgo recvHugeMessages(receiver.Inbox(), errorChannel, count, done)\n\tselect {\n\tcase <-done:\n\t\tfmt.Println(\"TestOneWaySend passed successfully\")\n\t\tbreak\n\tcase err := <-errorChannel:\n\t\tt.Errorf(\"Error in sending large message.\\n\" + err.Error())\n\tcase <-time.After(5 * time.Minute):\n\t\tt.Errorf(\"Could not send \", strconv.Itoa(count), \" messages in 5 minute\")\n\t\tbreak\n\t}\n}",
"func TestSender(t *testing.T) {\n\t// Set up our variables.\n\tdataChan := make(chan []byte)\n\tconn := new(MockConn)\n\tconn.SetBuffer = make(chan []byte)\n\n\t// Prepare the messages we're going to send.\n\tmessages := [][]byte{\n\t\t[]byte(\"PASS secretpasswordhere\"),\n\t\t[]byte(\"SERVICE dict * *.fr 0 0 :French Dictionary\"),\n\t\t[]byte(\":[email protected] QUIT :Gone to have lunch\"),\n\t}\n\n\t// Start the goroutine.\n\tgo Sender(conn, dataChan)\n\n\tfor _, msg := range messages {\n\t\tdataChan <- msg\n\t\tconn.SetBuffer <- msg\n\n\t\t// Sleep to give the other goroutine some time to make the copy. This slows the tests,\n\t\t// but we just have to accept that.\n\t\ttime.Sleep(200)\n\n\t\tif cmp := bytes.Compare(conn.Buffer, msg); cmp != 0 {\n\t\t\tt.Errorf(\n\t\t\t\t\"Failed to write correctly: expected %v, got %v.\",\n\t\t\t\tmsg,\n\t\t\t\tconn.Buffer)\n\t\t}\n\t}\n\n\t// Close the channel.\n\tclose(dataChan)\n}",
"func sendMessages(outbox chan *Envelope, count int, to int, from int) {\n\tfor i := 0; i < count; i += 1 {\n\t\toutbox <- &Envelope{Pid: to, Msg: strconv.Itoa(from) + \":\" + strconv.Itoa(i)}\n\t\tif i%100 == 0 {\n\t\t\t//\t\t\tfmt.Println(\"Sender sleeping \", i)\n\t\t\ttime.Sleep(delayInMillis * time.Millisecond)\n\t\t}\n\t}\n\tfmt.Println(\"Sender done sending\")\n}",
"func TestOneWaySend(t *testing.T) {\n\tfmt.Println(\"Starting one way send test. Be patient, the test may run for several minutes\")\n\tfmt.Println(\"One way send test case launches two servers and sends 10k messages from one \")\n\tfmt.Println(\"server to the other. Test checks that exactly one copy of every message is received\")\n\tconf := Config{MemberRegSocket: \"127.0.0.1:9999\", PeerSocket: \"127.0.0.1:9009\"}\n\n\t// launch proxy server\n\tgo acceptClusterMember(9999)\n\tgo sendClusterMembers(9009)\n\n\ttime.Sleep(100 * time.Millisecond)\n\n\tsender, err := NewWithConfig(1, \"127.0.0.1\", 5011, &conf)\n\tif err != nil {\n\t\tt.Errorf(\"Error in creating server \", err.Error())\n\t}\n\n\treceiver, err := NewWithConfig(2, \"127.0.0.1\", 5012, &conf)\n\tif err != nil {\n\t\tt.Errorf(\"Error in creating server \", err.Error())\n\t}\n\n\tdone := make(chan bool, 1)\n\tcount := 10000\n\n\trecord := make([]uint32, count)\n\n\tgo receive(receiver.Inbox(), record, count, 2, done)\n\tgo sendMessages(sender.Outbox(), count, 2, 1)\n\tselect {\n\tcase <-done:\n\t\tfmt.Println(\"TestOneWaySend passed successfully\")\n\t\tbreak\n\tcase <-time.After(5 * time.Minute):\n\t\tt.Errorf(\"Could not send \", strconv.Itoa(count), \" messages in 5 minute\")\n\t\tbreak\n\t}\n}",
"func Test_ListMessages_Success(t *testing.T) {\n\tdummy, err := ioutil.ReadFile(\"testdata/listmessages.json\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twantData := map[string]string{\n\t\t\"page\": \"5\",\n\t\t\"limit\": \"3\",\n\t}\n\tserver, client := setupTestServer(200, dummy, want(t, \"/messages\", \"GET\", wantData))\n\tdefer server.Close()\n\n\tmsgs, err := client.ListMessages(5, 3)\n\tif err != nil {\n\t\tt.Fatal(\"got error:\", err)\n\t}\n\n\tif len(msgs) != 3 {\n\t\tt.Fatalf(\"len(msgs) = %d, want %d\", len(msgs), 3)\n\t}\n\n\twantFirstMessage := Message{\n\t\tID: 3423093,\n\t\tStreamID: 48914,\n\t\tText: \"\",\n\t\tAuthor: User{\n\t\t\tID: 50654,\n\t\t\tName: \"Herman Schaaf\",\n\t\t\tAvatar: \"https://sqwiggle-assets.s3.amazonaws.com/assets/api/heart.png\",\n\t\t\tType: TypeUser,\n\t\t\tSupport: false,\n\t\t},\n\t\tAttachments: []Attachment{\n\t\t\t{\n\t\t\t\tID: 206099,\n\t\t\t\tType: TypeImage,\n\t\t\t\tURL: \"https://api.sqwiggle.com/attachments/206099/view\",\n\t\t\t\tTitle: \"gophercolor.png\",\n\t\t\t\tDescription: \"\",\n\t\t\t\tImage: \"https://sqwiggle-assets.s3.amazonaws.com/assets/api/lightning.png\",\n\t\t\t\tCreatedAt: time.Date(2015, time.February, 5, 13, 23, 8, 115000000, time.UTC),\n\t\t\t\tUpdatedAt: time.Date(2015, time.February, 5, 13, 23, 11, 163000000, time.UTC),\n\t\t\t\tAnimated: false,\n\t\t\t\tStatus: \"uploaded\",\n\t\t\t\tWidth: 3861,\n\t\t\t\tHeight: 3861,\n\t\t\t},\n\t\t},\n\t\tMentions: []Mention{},\n\t\tCreatedAt: time.Date(2015, time.February, 5, 13, 23, 8, 111000000, time.UTC),\n\t\tUpdatedAt: time.Date(2015, time.February, 5, 13, 23, 8, 111000000, time.UTC),\n\t}\n\n\t// compare the first message to our expectation\n\tdiff, err := compare(msgs[0], wantFirstMessage)\n\tif err != nil {\n\t\tt.Fatal(\"Failed to compare structs:\", err)\n\t}\n\tfor k, d := range diff {\n\t\tt.Errorf(\"%q: got %q, want %q\", k, d.a, d.b)\n\t}\n\n\twantSecondMessage := Message{\n\t\tID: 3423091,\n\t\tStreamID: 48914,\n\t\tText: \"This is a test, trin\",\n\t\tAuthor: User{\n\t\t\tID: 50654,\n\t\t\tName: \"Herman Schaaf\",\n\t\t\tAvatar: \"https://sqwiggle-assets.s3.amazonaws.com/assets/api/heart.png\",\n\t\t\tType: TypeUser,\n\t\t\tSupport: false,\n\t\t},\n\t\tAttachments: []Attachment{},\n\t\tMentions: []Mention{\n\t\t\t{\n\t\t\t\tSubjectType: TypeUser,\n\t\t\t\tSubjectID: 50665,\n\t\t\t\tText: \"trin\",\n\t\t\t\tName: \"trin\",\n\t\t\t\tIndices: []int{16, 20},\n\t\t\t\tMessageID: 3423091,\n\t\t\t\tID: 50665,\n\t\t\t},\n\t\t},\n\t\tCreatedAt: time.Date(2015, time.February, 5, 13, 22, 59, 830000000, time.UTC),\n\t\tUpdatedAt: time.Date(2015, time.February, 5, 13, 22, 59, 830000000, time.UTC),\n\t}\n\n\t// compare the second message to our expectation\n\tdiff, err = compare(msgs[1], wantSecondMessage)\n\tif err != nil {\n\t\tt.Fatal(\"Failed to compare structs:\", err)\n\t}\n\tfor k, d := range diff {\n\t\tt.Errorf(\"%q: got %q, want %q\", k, d.a, d.b)\n\t}\n\n}",
"func Test_GetMessages_Success(t *testing.T) {\n\tdummy, err := ioutil.ReadFile(\"testdata/getmessage.json\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// set up server to return 200 and message list response with three messages\n\tserver, client := setupTestServer(200, dummy, want(t, \"/messages/3423093\", \"GET\", nil))\n\tdefer server.Close()\n\n\tm, err := client.GetMessage(3423093)\n\tif err != nil {\n\t\tt.Fatal(\"got error:\", err)\n\t}\n\n\twant := Message{\n\t\tID: 3423093,\n\t\tStreamID: 48914,\n\t\tText: \"\",\n\t\tAuthor: User{\n\t\t\tID: 50654,\n\t\t\tName: \"Herman Schaaf\",\n\t\t\tAvatar: \"https://sqwiggle-assets.s3.amazonaws.com/assets/api/heart.png\",\n\t\t\tType: TypeUser,\n\t\t\tSupport: false,\n\t\t},\n\t\tAttachments: []Attachment{\n\t\t\t{\n\t\t\t\tID: 206099,\n\t\t\t\tType: TypeImage,\n\t\t\t\tURL: \"https://api.sqwiggle.com/attachments/206099/view\",\n\t\t\t\tTitle: \"gophercolor.png\",\n\t\t\t\tDescription: \"\",\n\t\t\t\tImage: \"https://sqwiggle-assets.s3.amazonaws.com/assets/api/lightning.png\",\n\t\t\t\tCreatedAt: time.Date(2015, time.February, 5, 13, 23, 8, 115000000, time.UTC),\n\t\t\t\tUpdatedAt: time.Date(2015, time.February, 5, 13, 23, 11, 163000000, time.UTC),\n\t\t\t\tAnimated: false,\n\t\t\t\tStatus: \"uploaded\",\n\t\t\t\tWidth: 3861,\n\t\t\t\tHeight: 3861,\n\t\t\t},\n\t\t},\n\t\tMentions: []Mention{},\n\t\tCreatedAt: time.Date(2015, time.February, 5, 13, 23, 8, 111000000, time.UTC),\n\t\tUpdatedAt: time.Date(2015, time.February, 5, 13, 23, 8, 111000000, time.UTC),\n\t}\n\n\tdiff, err := compare(m, want)\n\tif err != nil {\n\t\tt.Fatal(\"Failed to compare structs:\", err)\n\t}\n\tfor k, d := range diff {\n\t\tt.Errorf(\"%q: got %q, want %q\", k, d.a, d.b)\n\t}\n}",
"func TestLargeSend(t *testing.T) {\n\t// Create server\n\tserver, serverEvent := NewServer[[]byte, []byte]()\n\tassert(!server.Serving(), t, \"Server should not be serving\")\n\n\t// Start server\n\terr := server.Start(\"127.0.0.1\", 0)\n\tassertNoErr(err, t)\n\tassert(server.Serving(), t, \"Server should be serving\")\n\ttime.Sleep(waitTime)\n\n\t// Check server address info\n\thost, port, err := server.GetAddr()\n\tassertNoErr(err, t)\n\tassert(server.sock.Addr().String() == host+\":\"+strconv.Itoa(int(port)), t, \"Address strings don't match\")\n\tfmt.Printf(\"Server address: %s:%d\\n\", host, port)\n\n\t// Create client\n\tclient, clientEvent := NewClient[[]byte, []byte]()\n\tassert(!client.Connected(), t, \"Client should not be connected\")\n\n\t// Connect to server\n\terr = client.Connect(host, port)\n\tassertNoErr(err, t)\n\tassert(client.Connected(), t, \"Client should be connected\")\n\ttime.Sleep(waitTime)\n\n\t// Check client address info\n\thost, port, err = client.GetAddr()\n\tassertNoErr(err, t)\n\tassert(client.sock.LocalAddr().String() == host+\":\"+strconv.Itoa(int(port)), t, \"Address strings don't match\")\n\tfmt.Printf(\"Client address: %s:%d\\n\", host, port)\n\n\t// Check connect event was received\n\tclientConnectEvent := <-serverEvent\n\tassertEq(clientConnectEvent, ServerEvent[[]byte]{\n\t\tEventType: ServerConnect,\n\t\tClientID: 0,\n\t}, t)\n\n\t// Generate large messages\n\tlargeMessageFromServerLength := rand.Int() % 65536\n\tlargeMessageFromServer := make([]byte, largeMessageFromServerLength)\n\tn, err := rand.Read(largeMessageFromServer)\n\tassertNoErr(err, t)\n\tassertEq(n, largeMessageFromServerLength, t)\n\tfmt.Printf(\"Generated large message from server (%d bytes)\\n\", largeMessageFromServerLength)\n\tlargeMessageFromClientLength := rand.Int() % 32768\n\tlargeMessageFromClient := make([]byte, largeMessageFromClientLength)\n\tn, err = rand.Read(largeMessageFromClient)\n\tassertNoErr(err, t)\n\tassertEq(n, largeMessageFromClientLength, t)\n\tfmt.Printf(\"Generated large message from client (%d bytes)\\n\", largeMessageFromClientLength)\n\n\t// Send large message to client\n\terr = server.Send(largeMessageFromServer)\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Receive large message from server\n\tdataFromServer := <-clientEvent\n\tassertEq(dataFromServer, ClientEvent[[]byte]{\n\t\tEventType: ClientReceive,\n\t\tData: largeMessageFromServer,\n\t}, t)\n\n\t// Send large message to server\n\terr = client.Send(largeMessageFromClient)\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Receive large message from client\n\tdataFromClient := <-serverEvent\n\tassertEq(dataFromClient, ServerEvent[[]byte]{\n\t\tEventType: ServerReceive,\n\t\tClientID: 0,\n\t\tData: largeMessageFromClient,\n\t}, t)\n\n\t// Disconnect from server\n\terr = client.Disconnect()\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Check disconnect event was received\n\tclientDisconnectEvent := <-serverEvent\n\tassertEq(clientDisconnectEvent, ServerEvent[[]byte]{\n\t\tEventType: ServerDisconnect,\n\t\tClientID: 0,\n\t}, t)\n\n\t// Stop server\n\terr = server.Stop()\n\tassertNoErr(err, t)\n\tassert(!server.Serving(), t, \"Server should not be serving\")\n\ttime.Sleep(waitTime)\n}",
"func TestSendsRequests(t *testing.T) {\n\texpected := 33\n\n\tticker := make(chan time.Time)\n\tcodes := make(chan []byte, 0)\n\tf := &FakeTTY{c: codes}\n\tr := &NullReporter{}\n\tu := &NullUploader{}\n\tgo loop(ticker, f, r, u)\n\n\tticker <- time.Now()\n\n\t// allow 10 seconds to receive data\n\twait := time.Tick(10 * time.Second)\n\n\tvar i = 0\nX:\n\tfor {\n\t\tselect {\n\t\tcase c := <-codes:\n\t\t\tt.Logf(\"% x\\n\", c)\n\t\t\ti++\n\t\t\tif i == expected {\n\t\t\t\tbreak X\n\t\t\t}\n\t\tcase <-wait:\n\t\t\tt.Fatalf(\"expected %d codes but received %d\\n\", expected, i)\n\t\t}\n\t}\n}",
"func TestAsynchronousChat(t *testing.T) {\r\n\r\n\talice := NewChatter()\r\n\tbob := NewChatter()\r\n\tSkipOnError(t, DoHandshake(t, alice, bob))\r\n\r\n\tif VERBOSE {\r\n\t\tfmt.Println(\"\\n-------------------------------\")\r\n\t\tfmt.Println(\"Starting short asynchronous test\")\r\n\t\tfmt.Printf(\"-------------------------------\\n\\n\")\r\n\t}\r\n\r\n\taliceQueue := make([]*Message, 5)\r\n\tbobQueue := make([]*Message, 5)\r\n\r\n\tc := make(map[PublicKey]*Chatter)\r\n\tc[alice.Identity.PublicKey] = alice\r\n\tc[bob.Identity.PublicKey] = bob\r\n\r\n\tSendQueuedMessage(t, bobQueue, 1, alice, bob, \"AB.1\")\r\n\tSendQueuedMessage(t, bobQueue, 2, alice, bob, \"AB.2\")\r\n\tSendQueuedMessage(t, bobQueue, 3, alice, bob, \"AB.3\")\r\n\tSendQueuedMessage(t, aliceQueue, 1, bob, alice, \"BA.1\")\r\n\tSendQueuedMessage(t, aliceQueue, 2, bob, alice, \"BA.2\")\r\n\r\n\tFailOnError(t, DeliverQueuedMessage(t, c, aliceQueue, 2, false))\r\n\r\n\tFailOnError(t, DeliverQueuedMessage(t, c, aliceQueue, 1, false))\r\n\r\n\tFailOnError(t, DeliverQueuedMessage(t, c, bobQueue, 3, false))\r\n\r\n\tSendQueuedMessage(t, aliceQueue, 3, bob, alice, \"BA.3\")\r\n\tSendQueuedMessage(t, aliceQueue, 4, bob, alice, \"BA.4\")\r\n\tSendQueuedMessage(t, bobQueue, 4, alice, bob, \"AB.4\")\r\n\r\n\tFailOnError(t, DeliverQueuedMessage(t, c, aliceQueue, 4, false))\r\n\tFailOnError(t, DeliverQueuedMessage(t, c, aliceQueue, 3, false))\r\n\tFailOnError(t, DeliverQueuedMessage(t, c, bobQueue, 4, false))\r\n\tFailOnError(t, DeliverQueuedMessage(t, c, bobQueue, 2, false))\r\n\r\n\tFailOnError(t, DeliverQueuedMessage(t, c, bobQueue, 1, false))\r\n\r\n\tif _, err := bob.ReceiveMessage(bobQueue[1]); err == nil {\r\n\t\tt.Fatal(\"Accepted replay of late message without error\")\r\n\t}\r\n}",
"func verifyReceive(t *testing.T, ch IPCChannel, messages []string, name string, done chan bool) {\n\n\t//timer := time.After(5 * time.Second)\n\tfor _, testMsg := range messages {\n\t\tmsg := <-ch.GetMessage()\n\t\tlogger.Infof(\"%v received message: %v\", name, msg)\n\t\tassert.Equal(t, testMsg, msg)\n\n\t}\n\tdone <- true\n}",
"func TestSendSMS(t *testing.T) {\n\tmsg, err := TheClient.SendSMS(TheFromPhoneNumber, TheToPhoneNumber, \"Hello, world!\")\n\tif err != nil {\n\t\tt.Fatalf(\"Failed: %s\", err.Error())\n\t}\n\tbs, err := json.MarshalIndent(msg, \"\", \" \")\n\tif err != nil {\n\t\tt.Fatalf(\"Failed: %s\", err.Error())\n\t}\n\tt.Logf(\"Message Sent:\\n%s\\n\", string(bs))\n\n}",
"func Test2ClientChatUsingMultipleClients(t *testing.T) {\n\n\t//code to create random string for the datapoint\n\tconst charset = \"abcdefghijklmnopqrstuvwxyz\" +\n\t\t\"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n\n\tconst numclients = 100\n\tconst datapoints = 2\n\n\tvar seededRand = rand.New(\n\t\trand.NewSource(time.Now().UnixNano()))\n\n\tstringmaker := func(length int, charset string) string {\n\t\tb := make([]byte, length)\n\t\tfor i := range b {\n\t\t\tb[i] = charset[seededRand.Intn(len(charset))]\n\t\t}\n\t\treturn string(b)\n\t}\n\tgetstring := func(length int) string {\n\t\treturn stringmaker(length, charset)\n\t}\n\n\tstart := time.Now()\n\tdata := make([]string, datapoints)\n\tdone := make(chan bool, numclients)\n\t//start all the clients and pass the two msgs to each of them to broadcast\n\tfor i := 0; i < numclients; i++ {\n\t\tfor j := 0; j < datapoints; j++ {\n\t\t\tdata[j] = getstring(10)\n\t\t}\n\t\tgo startClient(data, done)\n\t}\n\t//wait for all the clients to finish\n\tcount := 0\n\tfor i := 1; i <= numclients; i++ {\n\t\t<-done\n\t\tcount = count + 1\n\t}\n\tassertEquals(t, count, numclients)\n\telapsed := time.Since(start)\n\t//measure the elapsed time\n\tfmt.Println(elapsed)\n}",
"func (m *MBProvider) doSend(bm *model.BaseMessage) Response {\n\t// create a composite message from provided message text\n\tcm := util.NewCompositeMessage(bm.Message)\n\n\ttotalparts := len(cm.MessageParts)\n\toriginator := bm.Originator\n\trecipients := []string{bm.Recipient}\n\n\t// allow only one call per second towards the external SMS API\n\tthrottle := time.Tick(1 * time.Second)\n\n\t// total number of sent message parts\n\tvar totalSent int32\n\n\t// use a WaitGroup to block until all the message parts are sent\n\tvar wg sync.WaitGroup\n\tfor _, mpart := range cm.MessageParts {\n\t\t// rate limit SMS sending\n\t\t<-throttle\n\n\t\t// increment the waitgroup counter\n\t\twg.Add(1)\n\t\t// launch a goroutine to send the message part\n\t\tgo func(message *util.Message) {\n\t\t\t// decrement the counter when the goroutine completem.\n\t\t\tdefer wg.Done()\n\n\t\t\tvar params *messagebird.MessageParams\n\t\t\t// attach UDH parameter only for concatenated sms messages\n\t\t\tif totalparts > 1 {\n\t\t\t\tparams = &messagebird.MessageParams{\n\t\t\t\t\tType: \"binary\",\n\t\t\t\t\tTypeDetails: messagebird.TypeDetails{\n\t\t\t\t\t\t\"udh\": message.Header.String(),\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// perform actual sending\n\t\t\t_, err := m.Client.NewMessage(originator, recipients, message.Body, params)\n\t\t\tif err != nil {\n\t\t\t\t// TODO collect errors and insert them into the response\n\t\t\t\tlog.Print(\"Error: \", err)\n\t\t\t}\n\n\t\t\tatomic.AddInt32(&totalSent, 1)\n\t\t}(mpart)\n\t}\n\n\t// wait for all parts to be sent\n\twg.Wait()\n\n\tif int(totalSent) != len(cm.MessageParts) {\n\t\treturn map[string]interface{}{\n\t\t\t\"Status\": \"Failed\",\n\t\t}\n\t}\n\n\treturn map[string]interface{}{\n\t\t\"Status\": \"Success\",\n\t\t\"TotalSentParts\": totalSent,\n\t}\n}",
"func TestParseTcp_allTestMessages(t *testing.T) {\n\tdns := newDns(testing.Verbose())\n\tfor _, q := range messagesTcp {\n\t\tt.Logf(\"Testing with query for %s\", q.q_name)\n\t\tparseTcpRequestResponse(t, dns, q)\n\t}\n}",
"func (c *Client) Send(data []byte) {\n\tn, err := c.conn.Write(data)\n\tfor i := 0; i < 5; i++ {\n\t\t//c.conn.Write([]byte(\"Hey \" + string(i) + \" FRANKIE\"))\n\t}\n\tlog.Printf(\"Wrote %d bytes (%s), sent to %s\", n, err, c.clientID)\n}",
"func TestSynchronousChatExtended(t *testing.T) {\r\n\t/*\r\n\t\tif testing.Short() {\r\n\t\t\tt.Skip(\"Skipping extended text in short mode.\")\r\n\t\t}\r\n\r\n\t\tchatters, err := SetupChatters(t, EXTENDED_TEST_PARTICIPANTS)\r\n\t\tSkipOnError(t, err)\r\n\r\n\t\tif VERBOSE {\r\n\t\t\tfmt.Println(\"\\n-------------------------------\")\r\n\t\t\tfmt.Printf(\"Starting extended synchronous testing, %d participants, %d rounds\\n\",\r\n\t\t\t\tEXTENDED_TEST_PARTICIPANTS,\r\n\t\t\t\tEXTENDED_TEST_ROUNDS)\r\n\t\t\tfmt.Printf(\"-------------------------------\\n\\n\")\r\n\t\t}\r\n\r\n\t\tfor i := 0; i < EXTENDED_TEST_ROUNDS; i++ {\r\n\r\n\t\t\tc1 := chatters[rand.Int()%len(chatters)]\r\n\t\t\tc2 := chatters[rand.Int()%len(chatters)]\r\n\t\t\tif c1 == c2 {\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\t\tm := fmt.Sprintf(\"M%d\", i)\r\n\t\t\tif VERBOSE {\r\n\t\t\t\tfmt.Printf(\"Message \\\"%s\\\" to be delivered from %s to %s\\n\",\r\n\t\t\t\t\tm,\r\n\t\t\t\t\tPrintHandle(&c1.Identity.PublicKey),\r\n\t\t\t\t\tPrintHandle(&c2.Identity.PublicKey))\r\n\t\t\t}\r\n\r\n\t\t\tFailOnError(t, CheckSendReceive(t, c1, c2, m))\r\n\t\t}\r\n\r\n\t*/\r\n}",
"func TestOneMessagePerRequest(t *testing.T) {\n\tt.Setenv(envVarPostMessagesFrequency, \"10h\")\n\tt.Setenv(envVarPostMessagesBatchSize, \"1\")\n\tt.Setenv(envVarBufferMaximum, \"1\")\n\tt.Setenv(envVarStreamChannelSize, \"0\")\n\n\thec := NewHTTPEventCollectorMock(t)\n\n\tgo hec.Serve()\n\n\tinfo := logger.Info{\n\t\tConfig: map[string]string{\n\t\t\tsplunkURLKey: hec.URL(),\n\t\t\tsplunkTokenKey: hec.token,\n\t\t},\n\t\tContainerID: \"containeriid\",\n\t\tContainerName: \"/container_name\",\n\t\tContainerImageID: \"contaimageid\",\n\t\tContainerImageName: \"container_image_name\",\n\t}\n\n\tloggerDriver, err := New(info)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor i := 0; i < 10; i++ {\n\t\tif err := loggerDriver.Log(&logger.Message{Line: []byte(strconv.Itoa(i)), Source: \"stdout\", Timestamp: time.Now()}); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\terr = loggerDriver.Close()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif len(hec.messages) != 10 {\n\t\tt.Fatal(\"Not all messages delivered\")\n\t}\n\n\tfor i, message := range hec.messages {\n\t\tif event, err := message.EventAsMap(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else {\n\t\t\tif event[\"line\"] != strconv.Itoa(i) {\n\t\t\t\tt.Fatalf(\"Unexpected event in message %v\", event)\n\t\t\t}\n\t\t}\n\t}\n\n\t// 1 to verify connection and 10 messages\n\tif hec.numOfRequests != 11 {\n\t\tt.Fatalf(\"Unexpected number of requests %d\", hec.numOfRequests)\n\t}\n\n\terr = hec.Close()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}",
"func TestSend(t *testing.T) {\n\t// Create server\n\tserver, serverEvent := NewServer[int, string]()\n\tassert(!server.Serving(), t, \"Server should not be serving\")\n\n\t// Start server\n\terr := server.Start(\"127.0.0.1\", 0)\n\tassertNoErr(err, t)\n\tassert(server.Serving(), t, \"Server should be serving\")\n\ttime.Sleep(waitTime)\n\n\t// Check server address info\n\thost, port, err := server.GetAddr()\n\tassertNoErr(err, t)\n\tassert(server.sock.Addr().String() == host+\":\"+strconv.Itoa(int(port)), t, \"Address strings don't match\")\n\tfmt.Printf(\"Server address: %s:%d\\n\", host, port)\n\n\t// Create client\n\tclient, clientEvent := NewClient[string, int]()\n\tassert(!client.Connected(), t, \"Client should not be connected\")\n\n\t// Connect to server\n\terr = client.Connect(host, port)\n\tassertNoErr(err, t)\n\tassert(client.Connected(), t, \"Client should be connected\")\n\ttime.Sleep(waitTime)\n\n\t// Check client address info\n\thost, port, err = client.GetAddr()\n\tassertNoErr(err, t)\n\tassert(client.sock.LocalAddr().String() == host+\":\"+strconv.Itoa(int(port)), t, \"Address strings don't match\")\n\tfmt.Printf(\"Client address: %s:%d\\n\", host, port)\n\n\t// Check connect event was received\n\tclientConnectEvent := <-serverEvent\n\tassertEq(clientConnectEvent, ServerEvent[string]{\n\t\tEventType: ServerConnect,\n\t\tClientID: 0,\n\t}, t)\n\n\t// Send message to client\n\tmessageFromServer := 29275\n\terr = server.Send(messageFromServer)\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Receive message from server\n\tclientReceiveEvent1 := <-clientEvent\n\tassertEq(clientReceiveEvent1, ClientEvent[int]{\n\t\tEventType: ClientReceive,\n\t\tData: messageFromServer,\n\t}, t)\n\ttime.Sleep(waitTime)\n\n\t// Send message to server\n\tmessageFromClient := \"Hello, server!\"\n\terr = client.Send(messageFromClient)\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Receive message from client\n\tserverReceiveEvent := <-serverEvent\n\tassertEq(serverReceiveEvent, ServerEvent[string]{\n\t\tEventType: ServerReceive,\n\t\tClientID: 0,\n\t\tData: messageFromClient,\n\t}, t)\n\ttime.Sleep(waitTime)\n\n\t// Send response to client\n\terr = server.Send(len(serverReceiveEvent.Data))\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Receive response from server\n\tclientReceiveEvent2 := <-clientEvent\n\tassertEq(clientReceiveEvent2, ClientEvent[int]{\n\t\tEventType: ClientReceive,\n\t\tData: len(messageFromClient),\n\t}, t)\n\ttime.Sleep(waitTime)\n\n\t// Disconnect from server\n\terr = client.Disconnect()\n\tassertNoErr(err, t)\n\ttime.Sleep(waitTime)\n\n\t// Check disconnect event was received\n\tclientDisconnectEvent := <-serverEvent\n\tassertEq(clientDisconnectEvent, ServerEvent[string]{\n\t\tEventType: ServerDisconnect,\n\t\tClientID: 0,\n\t}, t)\n\n\t// Stop server\n\terr = server.Stop()\n\tassertNoErr(err, t)\n\tassert(!server.Serving(), t, \"Server should not be serving\")\n\ttime.Sleep(waitTime)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test sending custom types | func TestSendingCustomTypes(t *testing.T) {
// Create server
server, serverEvent := NewServer[custom, custom]()
assert(!server.Serving(), t, "Server should not be serving")
// Start server
err := server.Start("127.0.0.1", 0)
assertNoErr(err, t)
assert(server.Serving(), t, "Server should be serving")
time.Sleep(waitTime)
// Check server address info
host, port, err := server.GetAddr()
assertNoErr(err, t)
assert(server.sock.Addr().String() == host+":"+strconv.Itoa(int(port)), t, "Address strings don't match")
fmt.Printf("Server address: %s:%d\n", host, port)
// Create client
client, clientEvent := NewClient[custom, custom]()
assert(!client.Connected(), t, "Client should not be connected")
// Connect to server
err = client.Connect(host, port)
assertNoErr(err, t)
assert(client.Connected(), t, "Client should be connected")
time.Sleep(waitTime)
// Check client address info
host, port, err = client.GetAddr()
assertNoErr(err, t)
assert(client.sock.LocalAddr().String() == host+":"+strconv.Itoa(int(port)), t, "Address strings don't match")
fmt.Printf("Client address: %s:%d\n", host, port)
// Check connect event was received
clientConnectEvent := <-serverEvent
assertEq(clientConnectEvent, ServerEvent[custom]{
EventType: ServerConnect,
ClientID: 0,
}, t)
// Messages
serverMessage := custom{
A: 123,
B: "Hello, custom server class!",
C: []string{"first server item", "second server item"},
}
clientMessage := custom{
A: 456,
B: "Hello, custom client class!",
C: []string{"#1 client item", "client item #2", "(3) client item"},
}
// Send message to client
err = server.Send(clientMessage)
assertNoErr(err, t)
time.Sleep(waitTime)
// Receive message from server
clientReceiveEvent1 := <-clientEvent
assertEq(clientReceiveEvent1, ClientEvent[custom]{
EventType: ClientReceive,
Data: clientMessage,
}, t)
time.Sleep(waitTime)
// Send message to server
err = client.Send(serverMessage)
assertNoErr(err, t)
time.Sleep(waitTime)
// Receive message from client
serverReceiveEvent := <-serverEvent
assertEq(serverReceiveEvent, ServerEvent[custom]{
EventType: ServerReceive,
ClientID: 0,
Data: serverMessage,
}, t)
time.Sleep(waitTime)
// Disconnect from server
err = client.Disconnect()
assertNoErr(err, t)
time.Sleep(waitTime)
// Check disconnect event was received
clientDisconnectEvent := <-serverEvent
assertEq(clientDisconnectEvent, ServerEvent[custom]{
EventType: ServerDisconnect,
ClientID: 0,
}, t)
// Stop server
err = server.Stop()
assertNoErr(err, t)
assert(!server.Serving(), t, "Server should not be serving")
time.Sleep(waitTime)
} | [
"func TestTypeSerializations(t *testing.T) {\n\ttype T struct{}\n\t// Expected buffer from serialization\n\tvar expectedBuffer = []byte{5, 0, 0, 0, 0}\n\t// Serialize tests\n\tSerializeTest(t, NewDocument(), expectedBuffer)\n\tSerializeTest(t, &T{}, expectedBuffer)\n}",
"func TestTypeConversionOperarion(t *testing.T) {\n\tConvey(\"TestTypeConversionOperarion\", t, func() {\n\t\tConvey(\"it should conversite type\", func() {\n\t\t\t// human 是一个type, 她可以接收一个不是 human 类型的数据\n\t\t\t// 然后将其转为 human\n\t\t\tSo(human(Dqh), ShouldEqual, \"dengqinghua\")\n\t\t\tSo(reflect.TypeOf(human(Dqh)).String(), ShouldEqual, \"golang.human\")\n\t\t})\n\t})\n}",
"func TestCustomMarshaller_Issue96(t *testing.T) {\n\ttype Vote struct {\n\t\tWhat YesNo\n\t}\n\ttestJsonFromStruct(t, Vote{}, `{\n \"swagger.Vote\": {\n \"id\": \"swagger.Vote\",\n \"required\": [\n \"What\"\n ],\n \"properties\": {\n \"What\": {\n \"type\": \"string\"\n }\n }\n }\n }`)\n}",
"func TestCustomDataType(t *testing.T) {\n\tuuid := &UUIDv4{}\n\n\tdt := DataTypeFromType(reflect.TypeOf(uuid))\n\tassert.NotNil(t, dt)\n\n\tif v, ok := dt.(DataType); ok {\n\t\tassert.Equal(t, \"uuid\", v.Format())\n\t\tassert.Equal(t, \"string\", v.Type())\n\t} else {\n\t\tt.Error(\"expected type to implements the DataType interface\")\n\t}\n}",
"func (s *LrpcTestSuite) TestUnsupportedType() {\n\tt := s.T()\n\n\tt.Log(\"Test using unsupported type\")\n\n\tcl := s.setupClient()\n\tdefer cl.Close()\n\n\tvar req []interface{}\n\n\treq = append(req, uint32(123))\n\treq = append(req, 1.234)\n\n\tres, err := DoRequest(cl, MsgEcho, req)\n\ts.Assert().ErrorIs(err, ErrLrpc2TypeNotSupported)\n\ts.Assert().Nil(res, \"No result expected\")\n\n}",
"func TestPrimitiveTypes(t *testing.T) {\n\ttype Prims struct {\n\t\tf float64\n\t\tt time.Time\n\t}\n\ttestJsonFromStruct(t, Prims{}, `{\n \"swagger.Prims\": {\n \"id\": \"swagger.Prims\",\n \"required\": [\n \"f\",\n \"t\"\n ],\n \"properties\": {\n \"f\": {\n \"type\": \"number\",\n \"format\": \"double\"\n },\n \"t\": {\n \"type\": \"string\",\n \"format\": \"date-time\"\n }\n }\n }\n }`)\n}",
"func TestMsgClaimHTLCType(t *testing.T) {\n\tmsg := types.NewMsgClaimHTLC(senderStr, idStr, secret.String())\n\trequire.Equal(t, \"claim_htlc\", msg.Type())\n}",
"func AddTypeTest(stringValue string, boolValue bool, intValue int, int8Value int8, int16Value int16, int32Value int32, int64Value int64, uintValue uint, uint8Value uint8, uint16Value uint16, uint32Value uint32, uint64Value uint64, float32Value float32, float64Value float64, dateValue sheetdb.Date, datetimeValue sheetdb.Datetime, customValue Sex, pBoolValue *bool, pIntValue *int, pInt8Value *int8, pInt16Value *int16, pInt32Value *int32, pInt64Value *int64, pUIntValue *uint, pUInt8Value *uint8, pUInt16Value *uint16, pUInt32Value *uint32, pUInt64Value *uint64, pFloat32Value *float32, pFloat64Value *float64, pDateValue *sheetdb.Date, pDatetimeValue *sheetdb.Datetime, pCustomValue *Sex) (*TypeTest, error) {\n\t_TypeTest_mutex.Lock()\n\tdefer _TypeTest_mutex.Unlock()\n\tif err := _TypeTest_validateStringValue(stringValue); err != nil {\n\t\treturn nil, err\n\t}\n\ttypeTest := &TypeTest{\n\t\tID: _TypeTest_maxRowNo + 1,\n\t\tStringValue: stringValue,\n\t\tBoolValue: boolValue,\n\t\tIntValue: intValue,\n\t\tInt8Value: int8Value,\n\t\tInt16Value: int16Value,\n\t\tInt32Value: int32Value,\n\t\tInt64Value: int64Value,\n\t\tUintValue: uintValue,\n\t\tUint8Value: uint8Value,\n\t\tUint16Value: uint16Value,\n\t\tUint32Value: uint32Value,\n\t\tUint64Value: uint64Value,\n\t\tFloat32Value: float32Value,\n\t\tFloat64Value: float64Value,\n\t\tDateValue: dateValue,\n\t\tDatetimeValue: datetimeValue,\n\t\tCustomValue: customValue,\n\t\tPBoolValue: pBoolValue,\n\t\tPIntValue: pIntValue,\n\t\tPInt8Value: pInt8Value,\n\t\tPInt16Value: pInt16Value,\n\t\tPInt32Value: pInt32Value,\n\t\tPInt64Value: pInt64Value,\n\t\tPUintValue: pUIntValue,\n\t\tPUint8Value: pUInt8Value,\n\t\tPUint16Value: pUInt16Value,\n\t\tPUint32Value: pUInt32Value,\n\t\tPUint64Value: pUInt64Value,\n\t\tPFloat32Value: pFloat32Value,\n\t\tPFloat64Value: pFloat64Value,\n\t\tPDateValue: pDateValue,\n\t\tPDatetimeValue: pDatetimeValue,\n\t\tPCustomValue: pCustomValue,\n\t}\n\tif err := typeTest._asyncAdd(_TypeTest_maxRowNo + 1); err != nil {\n\t\treturn nil, err\n\t}\n\t_TypeTest_maxRowNo++\n\t_TypeTest_cache[typeTest.ID] = typeTest\n\t_TypeTest_rowNoMap[typeTest.ID] = _TypeTest_maxRowNo\n\treturn typeTest, nil\n}",
"func TestRegistryReRegisterAnyType(t *testing.T) {\n\tassert.Panics(t,\n\t\tfunc() {\n\t\t\tmsgs.RegisterMessageType(base.AnyTypeName, []msgs.Representation{msgs.JSONRepresentation}, func() msgs.Message {\n\t\t\t\treturn base.NewAnyMessage(map[string]interface{}{})\n\t\t\t})\n\t\t},\n\t\t\"It should panic\",\n\t)\n}",
"func (p *Gopactor) ShouldSendType(param1 interface{}, params ...interface{}) string {\n\tsender, ok := param1.(*actor.PID)\n\tif !ok {\n\t\treturn \"Sender is not an actor PID\"\n\t}\n\n\t// If there is only one argument than it's the message to assert\n\tif len(params) != 1 {\n\t\treturn \"One parameter with a message is required to assert sending\"\n\t}\n\n\texpectedMsgType := params[0].(reflect.Type)\n\n\treturn p.shouldSendType(sender, nil, expectedMsgType)\n}",
"func RegisterTestNodeTypes() {\n\tnode.GetRegistry().RegisterSourceType(\"simplesource\", func() node.Source {\n\t\treturn &SimpleSource{}\n\t}, reflect.TypeOf(([]byte)(nil)))\n\n\tnode.GetRegistry().RegisterNodeType(\"filternode\", func() node.Node {\n\t\treturn &FilterNode{}\n\t}, reflect.TypeOf(([]byte)(nil)), reflect.TypeOf(\"\"))\n\n\tnode.GetRegistry().RegisterNodeType(\"errornode\", func() node.Node {\n\t\treturn &ErrorNode{}\n\t}, reflect.TypeOf(\"\"), reflect.TypeOf(\"\"))\n\n\tnode.GetRegistry().RegisterNodeType(\"errorhandlernode\", func() node.Node {\n\t\treturn &ErrorHandlerNode{}\n\t}, reflect.TypeOf(&firebolt.EventError{}), nil)\n\n\tnode.GetRegistry().RegisterNodeType(\"resultsnode\", func() node.Node {\n\t\treturn &ResultsNode{}\n\t}, reflect.TypeOf(\"\"), reflect.TypeOf(\"\"))\n\n\tnode.GetRegistry().RegisterNodeType(\"slownode\", func() node.Node {\n\t\treturn &SlowNode{}\n\t}, reflect.TypeOf(\"\"), reflect.TypeOf(\"\"))\n\n\tnode.GetRegistry().RegisterNodeType(\"stringtoproducerequestnode\", func() node.Node {\n\t\treturn &StringToProduceRequestNode{}\n\t}, reflect.TypeOf(\"\"), reflect.TypeOf((*firebolt.ProduceRequest)(nil)).Elem())\n\n\tnode.GetRegistry().RegisterNodeType(\"asyncfilternode\", func() node.Node {\n\t\treturn &AsyncFilterNode{}\n\t}, reflect.TypeOf(([]byte)(nil)), reflect.TypeOf(\"\"))\n\n\tnode.GetRegistry().RegisterNodeType(\"indexrequestbuildernode\", func() node.Node {\n\t\treturn &IndexRequestBuilderNode{}\n\t}, reflect.TypeOf(\"\"), reflect.TypeOf(elasticsearch.IndexRequest{}))\n}",
"func testTypeSwitches() {\n\tdo(21)\n\tdo(\"hello\")\n\tdo(true)\n}",
"func TestMsgCreateHTLCType(t *testing.T) {\n\tmsg := types.NewMsgCreateHTLC(senderStr, recipientStr, receiverOnOtherChain, senderOnOtherChain, amount, hashLockStr, timestamp, timeLock, notTransfer)\n\trequire.Equal(t, \"create_htlc\", msg.Type())\n}",
"func TestParsedTypes(t *testing.T) {\n\tconst (\n\t\tknownType = 1\n\t\tunknownType = 3\n\t\tsecondKnownType = 4\n\t)\n\n\ttests := []parsedTypeTest{\n\t\t{\n\t\t\tname: \"known and unknown\",\n\t\t\tencode: []tlv.Type{knownType, unknownType},\n\t\t\tdecode: []tlv.Type{knownType},\n\t\t\texpParsedTypes: tlv.TypeMap{\n\t\t\t\tunknownType: []byte{0, 0, 0, 0, 0, 0, 0, 0},\n\t\t\t\tknownType: nil,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"known and missing known\",\n\t\t\tencode: []tlv.Type{knownType},\n\t\t\tdecode: []tlv.Type{knownType, secondKnownType},\n\t\t\texpParsedTypes: tlv.TypeMap{\n\t\t\t\tknownType: nil,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\ttest := test\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\ttestParsedTypes(t, test)\n\t\t})\n\t}\n}",
"func TestTypeUsage(t *testing.T) {\n\tConvey(\"TestTypeUsage\", t, func() {\n\t\tConvey(\"the Dqh type should be a boy\", func() {\n\t\t\tSo(reflect.TypeOf(Dqh).Kind(), ShouldEqual, reflect.String)\n\t\t\tSo(reflect.TypeOf(Dqh).String(), ShouldEqual, \"golang.boy\")\n\n\t\t\tSo(Dqh, ShouldEqual, \"dengqinghua\")\n\t\t\tSo(a, ShouldEqual, 3)\n\t\t})\n\t})\n}",
"func TestPostAddNewShapeInvaliddto(t *testing.T) {\n request := createPostAddNewShapeRequest()\n request.dto = invalidizeTestParamValue(request.dto, \"dto\", \"interface{}\").(interface{})\n e := initializeTest(\"PostAddNewShape\", \"dto\", request.dto)\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n r, _, e := getTestApiClient().ShapesApi.PostAddNewShape(request)\n assertError(t, \"PostAddNewShape\", \"dto\", r.Code, e)\n}",
"func RTTest(t *testing.T) {\n\t//var msgType *ros.DynamicMessageType\n\tvar err error\n\n\t//Instantiate a new dynamic message type\n\tmsgType, err := ros.NewDynamicMessageType(\"geometry_msgs/Twist\")\n\tif err != nil {\n\t\tt.Error(\"failed to get message definition; \", err)\n\t\treturn\n\t}\n\t//Instantiate the sub message. This is not necessary for operation,\n\t// but for us to manually setup data to test the main message type\n\tnestedMsgType, err := ros.NewDynamicMessageType(\"geometry_msgs/Vector3\")\n\tif err != nil {\n\t\tt.Error(\"failed to get message definition; \", err)\n\t\treturn\n\t}\n\t//Example JSON payload, Marshaled JSON\n\texamplePayload := `{\"angular\":{\"x\":1,\"y\":2,\"z\":3},\"linear\":{\"x\":1,\"y\":2,\"z\":3}}`\n\n\t//Declaring example bytes taken from external ROS source\n\trawmsg := \"000000000000f03f00000000000000400000000000000840000000000000f03f00000000000000400000000000000840\"\n\texampleBytes, err := hex.DecodeString(rawmsg)\n\n\t//Example message data\n\texampleMsg := \"geometry_msgs/Twist::map[angular:geometry_msgs/Vector3::map[x:1.00000 y:2.00000 z:3.00000] linear:geometry_msgs/Vector3::map[x:1.00000 y:2.00000 z:3.00000]]\"\n\n\t//Example schema\n\texampleSchema := `{\"$id\":\"/ros/testy\",\"$schema\":\"https://json-schema.org/draft-07/schema#\",\"properties\":{\"x\":{\"title\":\"/ros/testy/x\",\"type\":\"number\"},\"y\":{\"title\":\"/ros/testy/y\",\"type\":\"number\"},\"z\":{\"title\":\"/ros/testy/z\",\"type\":\"number\"}},\"title\":\"/ros/testy\",\"type\":\"object\"}`\n\t//Generating a schema for geometry_msgs/Vector3 on topic chatty\n\tschema, err := nestedMsgType.GenerateJSONSchema(\"/ros/\", \"testy\")\n\tif err != nil {\n\t\tt.Error(\"failed to get generate JSON schema; \", err)\n\t\treturn\n\t}\n\t//Converting json schema into string for comparison to input schema\n\trosgoSchema := string(schema)\n\n\t//Creating new message instances of the message types to be used for serialization/deserialization tests\n\tdynamicMsg := msgType.NewMessage().(*ros.DynamicMessage)\n\tdynamicBlankMsg := msgType.NewMessage().(*ros.DynamicMessage)\n\treturnMsg := msgType.NewMessage().(*ros.DynamicMessage)\n\tnestedDynamicMsg := nestedMsgType.NewMessage().(*ros.DynamicMessage)\n\n\t//Declaring some sample data for serialization\n\td := dynamicMsg.Data()\n\td2 := nestedDynamicMsg.Data()\n\td2[\"x\"] = ros.JsonFloat64{F: 1.0}\n\td2[\"y\"] = ros.JsonFloat64{F: 2.0}\n\td2[\"z\"] = ros.JsonFloat64{F: 3.0}\n\td[\"angular\"] = nestedDynamicMsg\n\td[\"linear\"] = nestedDynamicMsg\n\n\t//Using UnmarshalJSON method on a set of example bytes to compare with example Message\n\terr = dynamicBlankMsg.UnmarshalJSON([]byte(examplePayload))\n\n\t//Serializing message into bytes buffer\n\tvar buf bytes.Buffer\n\terr = dynamicMsg.Serialize(&buf)\n\tif err != nil {\n\t\tt.Error(\"failed to serialize message; \", err)\n\t\treturn\n\t}\n\trosgoBytes := buf.Bytes()\n\n\tvar buf2 bytes.Buffer\n\terr = dynamicBlankMsg.Serialize(&buf2)\n\tif err != nil {\n\t\tt.Error(\"failed to serialize message; \", err)\n\t\treturn\n\t}\n\tjsonBytes := buf2.Bytes()\n\n\t//Deserializing message into bytes reader\n\treader := bytes.NewReader(buf.Bytes())\n\terr = returnMsg.Deserialize(reader)\n\tif err != nil {\n\t\tt.Error(\"failed to deserialize message; \", err)\n\t}\n\trosgoMsg := fmt.Sprintf(\"%v\", returnMsg)\n\n\t//Using MarshalJSON method on dynamic message to create JSON payload\n\tpayloadMsg, err := dynamicMsg.MarshalJSON()\n\tif err != nil {\n\t\tt.Error(\"failed to marshal JSON; \", err)\n\t\treturn\n\t}\n\t//Convert to string and compare to example JSON payload\n\trosgoPayload := fmt.Sprintf(\"%s\", payloadMsg)\n\tif rosgoPayload != examplePayload {\n\t\tt.Error(\"marshalled JSON incorrect: \" + rosgoPayload + \" vs \" + examplePayload)\n\t\treturn\n\t}\n\n\t//Comparing byte slice arrays to check Serialization worked\n\tres := bytes.Compare(exampleBytes, rosgoBytes)\n\tif res != 0 {\n\t\tt.Error(\"Serialized Message incorrect: \" + string(rosgoBytes) + \" vs \" + string(exampleBytes))\n\t\treturn\n\t}\n\t//Comparing deserialized ros messages to check Deserialization worked\n\tif rosgoMsg != exampleMsg {\n\t\tt.Error(\"Deserialized message incorrect: \" + rosgoMsg + \" vs \" + exampleMsg)\n\t\treturn\n\t}\n\t//Comparing unmarshalled payload to check unmarshalJSON worked\n\tres = bytes.Compare(jsonBytes, rosgoBytes)\n\tif res != 0 {\n\t\tt.Error(\"Unmarshalled message incorrect; \", err)\n\t\treturn\n\t}\n\t//Comparing json schema to example schema to check GenerateJSONSchema worked\n\tif rosgoSchema != exampleSchema {\n\t\tt.Error(\"JSON Schema information incorrect; \", err)\n\t\treturn\n\t}\n\treturn\n}",
"func cleanType(tc *TestCollector) {\n\t// intuit pod or command or invalid\n\tif tc.Type == \"\" {\n\t\t// assume command if cmd provided\n\t\tif tc.Cmd != \"\" {\n\t\t\ttc.Type = command\n\t\t} else {\n\t\t\ttc.Type = pod\n\t\t}\n\t}\n\ttc.Type = strings.ToLower(tc.Type)\n}",
"func testMessage(i int, f float64) *starlarkproto.Message {\n\tmsg := testMessageType.Message()\n\tif err := msg.SetField(\"i\", starlark.MakeInt(i)); err != nil {\n\t\tpanic(err)\n\t}\n\tif err := msg.SetField(\"f\", starlark.Float(f)); err != nil {\n\t\tpanic(err)\n\t}\n\treturn msg\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.