query
stringlengths
8
6.75k
document
stringlengths
9
1.89M
negatives
listlengths
19
19
metadata
dict
GetDatapointByID fetches a datapoint from the database and returns it
func GetDatapointByID(id int64) (dp *Datapoint, err error) { var datasetID int64 var imageURL string err = DB.QueryRow("SELECT id, dataset_id, image_url FROM datapoint WHERE id=$1", id).Scan(&id, &datasetID, &imageURL) if err != nil { return // something went wrong! lets get out of here! } dp = &Datapoint{ ID: id, DatasetID: datasetID, ImageURL: imageURL, } return }
[ "func (c *MainController) getDataByID(PID int) {\n\to := orm.NewOrm()\n\tproduct := models.Product{PID: PID}\n\terr := o.Read(&product)\n\tif err != nil {\n\t\tbeego.Info(\"o.Read err=\", err)\n\t\treturn\n\t}\n\tc.Data[\"product\"] = product\n}", "func (db *PostgresDatapointRepo) CreateDatapoint(dp earthworks.Datapoint) (earthworks.Datapoint, error) {\n\tquery := `INSERT INTO datapoint (location) VALUES ($1) RETURNING id`\n\tcreated := earthworks.Datapoint{}\n\terr := db.Get(&created, query, wkt.MarshalString(dp.Location))\n\tif err != nil {\n\t\treturn earthworks.Datapoint{}, err\n\t}\n\n\treturn created, nil\n}", "func DefaultReadIntPoint(ctx context.Context, in *IntPoint, db *gorm1.DB) (*IntPoint, error) {\n\tif in == nil {\n\t\treturn nil, errors.New(\"Nil argument to DefaultReadIntPoint\")\n\t}\n\tdb = db.Set(\"gorm:auto_preload\", true)\n\tormParams, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ormParams.Id == 0 {\n\t\treturn nil, errors.New(\"Read requires a non-zero primary key\")\n\t}\n\tormResponse := IntPointORM{}\n\tif err = db.Where(&ormParams).First(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tpbResponse, err := ormResponse.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func (ds *KVStorage) GetByID(ID string) (graph.Graph, bool) {\n\tif val, ok := ds.data[ID]; ok {\n\t\treturn val, ok\n\t}\n\treturn graph.Graph{}, false\n}", "func (dss *DataSources) GetById(id int64) *DataSource {\n\tif dss.l != nil {\n\t\tdss.l.RLock()\n\t\tdefer dss.l.RUnlock()\n\t}\n\treturn dss.byId[id]\n}", "func NewDatapoint(datasetID int64, imageURL string) (dp *Datapoint, err error) {\n\tvar id int64\n\n\terr = DB.QueryRow(\"INSERT INTO datapoint (dataset_id, image_url) VALUES ($1, $2) RETURNING id\", datasetID, imageURL).Scan(&id)\n\n\tif err != nil {\n\t\treturn // something went wrong! lets get out of here!\n\t}\n\n\t//blank space for readability\n\n\tdp = &Datapoint{\n\t\tID: id,\n\t\tDatasetID: datasetID,\n\t\tImageURL: imageURL,\n\t}\n\n\treturn\n}", "func (DiseaseUsecase *DiseaseUsecaseImpl) GetByID(id string) (*model.Disease, error) {\n\tDisease, err := DiseaseUsecase.DiseasetRepository.FindByID(id)\n\t//err:=UserUsecase.userRepository.FindByID(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn Disease, nil\n}", "func (dao *LendingPairDao) GetByID(id bson.ObjectId) (*types.LendingPair, error) {\n\tvar response *types.LendingPair\n\terr := db.GetByID(dao.dbName, dao.collectionName, id, &response)\n\treturn response, err\n}", "func (ds *DataStore) GetByID(id, url string) (todos.Todo, error) {\n\tvar todo todos.Todo\n\trow, err := ds.DB.Get(ds.ctx, id, nil)\n\tif err != nil {\n\t\treturn todo, fmt.Errorf(\"error getting doc with ID %s: %s\", id, err)\n\t}\n\tvar doc todoDoc\n\tif err := row.ScanDoc(&doc); err != nil {\n\t\treturn todo, fmt.Errorf(\"error scanning doc: %s\", err)\n\t}\n\ttodo = convertDocToTodo(doc)\n\ttodo.URL = url\n\n\treturn todo, nil\n}", "func (item *Item) GetDataByID(id string) ([]*Item, FoulError) {\n\n\tusableID, _ := strconv.Atoi(id)\n\n\tdata, err := getItemByID(usableID)\n\treturn data, err\n}", "func (sp *serviceProvider) GetByID(conn orm.Connection, uid int32) (*Staff, error) {\n\tstaff := &Staff{}\n\n\tdb := conn.(*gorm.DB).Exec(\"USE staff\")\n\terr := db.Model(staff).Where(\"id = ? AND resigned = false\", uid).First(staff).Error\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn staff, nil\n}", "func (d Datapoints) DatapointAt(n int) Datapoint { return d[n] }", "func (repository *Repository) GetByID(id string) (*comments.Comment, error) {\n\treturn repository.data[id], nil\n}", "func (d DB) GetByID(string) (Profile, error) { return Profile{}, errors.New(\"stub\") }", "func (d *DataFromMem) GetOnePerson(ctx context.Context, in *api.Person) (*api.Person, error) {\n\tif !bson.IsObjectIdHex(in.Id) {\n\t\tlog.Println(\"Provided ID is invalid\")\n\n\t\treturn &api.Person{}, nil\n\t}\n\n\tif v, ok := d.Data[bson.ObjectIdHex(in.Id)]; ok {\n\t\tfmt.Println(\"Person sucessfully located.\")\n\n\t\treturn &api.Person{Id: in.Id, Name: v.Name, Age: v.Age, Profession: v.Profession, Node: d.ID}, nil\n\t}\n\tfmt.Println(\"Person not located.\")\n\treturn &api.Person{}, nil\n}", "func (d *Handle) GetFeedByID(id int64) (*FeedInfo, error) {\n\td.syncMutex.Lock()\n\tdefer d.syncMutex.Unlock()\n\tvar f FeedInfo\n\t//\terr := sqlx.Get(d.queryer, &f, \"SELECT * from feed_info WHERE id = ? LIMIT 1\", id)\n\terr := sqlx.Get(d.queryer, &f, \"SELECT * from feed_info WHERE id = ? LIMIT 1\", id)\n\treturn &f, err\n}", "func (db *Access) getSingleObjectFromID(id string) (datatypes.JS, error) {\r\n\tindexData, err := db.indexTable.Get(id)\r\n\tif err != nil {\r\n\t\t//TODO we might want to change this to a less dramatic error handler\r\n\t\t//Looking at the func above, a query for 100IDs (for example) will fail\r\n\t\t//if a single item is missing. Not good.\r\n\t\t//UPDATE: lol indeed I just got to that situation.\r\n\t\t//UPDATE: okkk deletion implemented, time to fix this.\r\n\t\treturn nil, errors.New(\"Object deleted or non-existent\")\r\n\t}\r\n\tdbData := db.readDbData(&indexData)\r\n\treturn util.GetJSON(dbData), nil\r\n}", "func (c *Client) DatacenterGetForUserByID(datacenterName string, userID int) (*Datacenter, error) {\n\treturn c.datacenterGetForUser(datacenterName, userID)\n}", "func (server *Server) GetUserPointByUserID(c *gin.Context) {\n\tuserID := c.Param(\"id\")\n\tconvertedUserID, err := strconv.ParseUint(userID, 10, 64)\n\tif err != nil {\n\t\tlog.Printf(err.Error())\n\t\terrList[\"invalid_request\"] = \"Invalid request\"\n\t\tc.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\"error\": errList,\n\t\t})\n\t\treturn\n\t}\n\n\tuserPoint := models.UserPoint{}\n\tuserPoints, err := userPoint.FindPointHistoryByUserID(server.DB, convertedUserID)\n\tif err != nil {\n\t\tlog.Printf(err.Error())\n\t\terrList[\"no_user\"] = \"No user found\"\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"error\": errList,\n\t\t})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"response\": userPoints,\n\t})\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewDeleteCoreV1NamespacedPodOK creates DeleteCoreV1NamespacedPodOK with default headers values
func NewDeleteCoreV1NamespacedPodOK() *DeleteCoreV1NamespacedPodOK { return &DeleteCoreV1NamespacedPodOK{} }
[ "func NewDeleteCoreV1NamespacedPodTemplateOK() *DeleteCoreV1NamespacedPodTemplateOK {\n\treturn &DeleteCoreV1NamespacedPodTemplateOK{}\n}", "func NewDeleteCoreV1NamespacedServiceOK() *DeleteCoreV1NamespacedServiceOK {\n\treturn &DeleteCoreV1NamespacedServiceOK{}\n}", "func NewCreateCoreV1NamespacedPodOK() *CreateCoreV1NamespacedPodOK {\n\treturn &CreateCoreV1NamespacedPodOK{}\n}", "func NewDeleteCoreV1NamespacedPodUnauthorized() *DeleteCoreV1NamespacedPodUnauthorized {\n\n\treturn &DeleteCoreV1NamespacedPodUnauthorized{}\n}", "func NewDeleteCoreV1NamespacedEventOK() *DeleteCoreV1NamespacedEventOK {\n\treturn &DeleteCoreV1NamespacedEventOK{}\n}", "func NewDeleteCoreV1NamespacedConfigMapOK() *DeleteCoreV1NamespacedConfigMapOK {\n\n\treturn &DeleteCoreV1NamespacedConfigMapOK{}\n}", "func (o *DeleteCoreV1NamespacedPodOK) WithPayload(payload *models.IoK8sAPICoreV1Pod) *DeleteCoreV1NamespacedPodOK {\n\to.Payload = payload\n\treturn o\n}", "func NewDeleteCoreV1NamespacedServiceAccountOK() *DeleteCoreV1NamespacedServiceAccountOK {\n\treturn &DeleteCoreV1NamespacedServiceAccountOK{}\n}", "func NewDelete(reference string) *api.BaseAPI {\n\tdeleteNSGroupStubAPI := api.NewBaseAPI(http.MethodDelete, wapiVersion+\"/\"+reference, nil, new(string))\n\treturn deleteNSGroupStubAPI\n}", "func NewCreateCoreV1NamespacedPodUnauthorized() *CreateCoreV1NamespacedPodUnauthorized {\n\treturn &CreateCoreV1NamespacedPodUnauthorized{}\n}", "func NewDeleteCoreV1NamespacedPodTemplateUnauthorized() *DeleteCoreV1NamespacedPodTemplateUnauthorized {\n\treturn &DeleteCoreV1NamespacedPodTemplateUnauthorized{}\n}", "func NewCreateCoreV1NamespacedPodTemplateOK() *CreateCoreV1NamespacedPodTemplateOK {\n\treturn &CreateCoreV1NamespacedPodTemplateOK{}\n}", "func NewDeleteCoreV1CollectionNamespacedEventOK() *DeleteCoreV1CollectionNamespacedEventOK {\n\n\treturn &DeleteCoreV1CollectionNamespacedEventOK{}\n}", "func NewDeleteCoreV1CollectionNamespacedReplicationControllerOK() *DeleteCoreV1CollectionNamespacedReplicationControllerOK {\n\treturn &DeleteCoreV1CollectionNamespacedReplicationControllerOK{}\n}", "func NewReadCoreV1NamespacedPodOK() *ReadCoreV1NamespacedPodOK {\n\treturn &ReadCoreV1NamespacedPodOK{}\n}", "func NewDeleteCoreV1NamespacedPodAccepted() *DeleteCoreV1NamespacedPodAccepted {\n\n\treturn &DeleteCoreV1NamespacedPodAccepted{}\n}", "func NewDeleteCoreV1NamespacedServiceAccountUnauthorized() *DeleteCoreV1NamespacedServiceAccountUnauthorized {\n\treturn &DeleteCoreV1NamespacedServiceAccountUnauthorized{}\n}", "func NewDeleteCoreV1NamespacedServiceUnauthorized() *DeleteCoreV1NamespacedServiceUnauthorized {\n\treturn &DeleteCoreV1NamespacedServiceUnauthorized{}\n}", "func Delete(kind string, name string, namespace string, args ...string) (err error) {\n\tdeleteArgs := []string{\"delete\", kind, name, \"-n\", namespace}\n\t_, err = kubectl(append(deleteArgs, args...)...)\n\treturn\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithPayload adds the payload to the delete core v1 namespaced pod o k response
func (o *DeleteCoreV1NamespacedPodOK) WithPayload(payload *models.IoK8sAPICoreV1Pod) *DeleteCoreV1NamespacedPodOK { o.Payload = payload return o }
[ "func (o *DeleteCoreV1NamespacedPodOK) SetPayload(payload *models.IoK8sAPICoreV1Pod) {\n\to.Payload = payload\n}", "func (o *DeleteCoreV1NamespacedPodAccepted) WithPayload(payload *models.IoK8sAPICoreV1Pod) *DeleteCoreV1NamespacedPodAccepted {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteCoreV1NamespacedConfigMapOK) WithPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) *DeleteCoreV1NamespacedConfigMapOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteCoreV1CollectionNamespacedEventOK) WithPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) *DeleteCoreV1CollectionNamespacedEventOK {\n\to.Payload = payload\n\treturn o\n}", "func (n *Namespaces) DestroyWithLabel(ctx context.Context, ns string, opts DeleteAllOptions) error {\n\tlistOptions := metav1.ListOptions{\n\t\tLabelSelector: opts.LabelSelector,\n\t}\n\n\ttrip, err := world.NewTrip(n.restConfig, &world.Options{\n\t\tNamespace: ns,\n\t\tParallelism: parallelism,\n\t\tList: listOptions,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgroupResources, err := restmapper.GetAPIGroupResources(n.discClient)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trm := restmapper.NewDiscoveryRESTMapper(groupResources)\n\n\t// This is done because the wander function will try to list all k8s resources and most of them cannot be listed by\n\t// Okteto user's service accounts, so it prints tons of warnings in the standard output. Setting the logrus level to err avoid those warnings\n\tprevLevel := logrus.GetLevel()\n\tlogrus.SetLevel(logrus.ErrorLevel)\n\tdefer func() {\n\t\tlogrus.SetLevel(prevLevel)\n\t}()\n\n\treturn trip.Wander(ctx, world.TravelerFunc(func(obj runtime.Object) error {\n\t\tm, err := meta.Accessor(obj)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgvk := obj.GetObjectKind().GroupVersionKind()\n\t\tif !opts.IncludeVolumes && gvk.Kind == volumeKind {\n\t\t\toktetoLog.Debugf(\"skipping deletion of pvc '%s'\", m.GetName())\n\t\t\treturn nil\n\t\t}\n\t\tmapping, err := rm.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, gvk.Version)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdeleteOpts := metav1.DeleteOptions{}\n\n\t\t// It seems that by default, client-go don't delete pods scheduled by jobs, so we need to set the propation policy\n\t\tif gvk.Kind == jobKind {\n\t\t\tdeletePropagation := metav1.DeletePropagationBackground\n\t\t\tdeleteOpts.PropagationPolicy = &deletePropagation\n\t\t}\n\n\t\terr = n.dynClient.\n\t\t\tResource(mapping.Resource).\n\t\t\tNamespace(ns).\n\t\t\tDelete(ctx, m.GetName(), deleteOpts)\n\n\t\tif err != nil {\n\t\t\toktetoLog.Debugf(\"error deleting '%s' '%s': %s\", gvk.Kind, m.GetName(), err)\n\t\t\treturn err\n\t\t}\n\n\t\toktetoLog.Debugf(\"successfully deleted '%s' '%s'\", gvk.Kind, m.GetName())\n\t\treturn nil\n\t}))\n}", "func (o *DeletePostbyIDOK) WithPayload(payload *models.Response) *DeletePostbyIDOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeletePolicyOK) WithPayload(payload *models.Policy) *DeletePolicyOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteCoreV1NamespacedPodAccepted) SetPayload(payload *models.IoK8sAPICoreV1Pod) {\n\to.Payload = payload\n}", "func Delete(kind string, name string, namespace string, args ...string) (err error) {\n\tdeleteArgs := []string{\"delete\", kind, name, \"-n\", namespace}\n\t_, err = kubectl(append(deleteArgs, args...)...)\n\treturn\n}", "func (o *ObjectsReferencesDeleteForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsReferencesDeleteForbidden {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteGroupNamespaceUnauthorized) WithPayload(payload *models.Error) *DeleteGroupNamespaceUnauthorized {\n\to.Payload = payload\n\treturn o\n}", "func (o *ObjectsReferencesDeleteInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsReferencesDeleteInternalServerError {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteCoreV1NamespacedConfigMapOK) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}", "func (o *DeletePostbyIDInternalServerError) WithPayload(payload *models.Response) *DeletePostbyIDInternalServerError {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteRuntimeContainerInternalServerError) WithPayload(payload string) *DeleteRuntimeContainerInternalServerError {\n\to.Payload = payload\n\treturn o\n}", "func (o *WeaviateSchemaActionsDeleteBadRequest) WithPayload(payload *models.ErrorResponse) *WeaviateSchemaActionsDeleteBadRequest {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteNodeUnauthorized) WithPayload(payload *models.ErrorResponse) *DeleteNodeUnauthorized {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteUserOK) WithPayload(payload *models.DeletedResponse) *DeleteUserOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteAppsV1NamespacedReplicaSetOK) WithPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) *DeleteAppsV1NamespacedReplicaSetOK {\n\to.Payload = payload\n\treturn o\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SetPayload sets the payload to the delete core v1 namespaced pod o k response
func (o *DeleteCoreV1NamespacedPodOK) SetPayload(payload *models.IoK8sAPICoreV1Pod) { o.Payload = payload }
[ "func (o *DeleteCoreV1NamespacedPodAccepted) SetPayload(payload *models.IoK8sAPICoreV1Pod) {\n\to.Payload = payload\n}", "func (o *DeleteCoreV1NamespacedConfigMapOK) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}", "func (o *DeleteCoreV1CollectionNamespacedEventOK) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}", "func (o *DeleteOrganizationOK) SetPayload(payload *models.DeletedResponse) {\n\to.Payload = payload\n}", "func (o *DeleteCoreV1NamespacedPodOK) WithPayload(payload *models.IoK8sAPICoreV1Pod) *DeleteCoreV1NamespacedPodOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteDiscoveryV1beta1NamespacedEndpointSliceOK) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}", "func (o *DeleteAppsV1NamespacedReplicaSetOK) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}", "func (o *DeleteCoreV1NamespacedConfigMapAccepted) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}", "func (o *DeletePostbyIDOK) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *DeletePostbyIDInternalServerError) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *DeleteNodeUnauthorized) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *WeaviateSchemaActionsDeleteBadRequest) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *DeleteOrgNodeOK) SetPayload(payload *models.Organization) {\n\to.Payload = payload\n}", "func (o *DeleteRuntimeEnvironmentOK) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *WatchCoreV1NamespacedEndpointsOK) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1WatchEvent) {\n\to.Payload = payload\n}", "func (o *CreateCoreV1NamespacedPodBindingOK) SetPayload(payload *models.IoK8sAPICoreV1Binding) {\n\to.Payload = payload\n}", "func (o *DeleteNodeInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *DeletePostbyIDUnauthorized) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *ObjectsReferencesDeleteForbidden) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewDeleteCoreV1NamespacedPodAccepted creates DeleteCoreV1NamespacedPodAccepted with default headers values
func NewDeleteCoreV1NamespacedPodAccepted() *DeleteCoreV1NamespacedPodAccepted { return &DeleteCoreV1NamespacedPodAccepted{} }
[ "func NewDeleteCoreV1NamespacedPodTemplateAccepted() *DeleteCoreV1NamespacedPodTemplateAccepted {\n\treturn &DeleteCoreV1NamespacedPodTemplateAccepted{}\n}", "func NewCreateCoreV1NamespacedPodAccepted() *CreateCoreV1NamespacedPodAccepted {\n\treturn &CreateCoreV1NamespacedPodAccepted{}\n}", "func NewDeleteCoreV1NamespacedServiceAccepted() *DeleteCoreV1NamespacedServiceAccepted {\n\treturn &DeleteCoreV1NamespacedServiceAccepted{}\n}", "func NewDeleteCoreV1NamespacedEventAccepted() *DeleteCoreV1NamespacedEventAccepted {\n\treturn &DeleteCoreV1NamespacedEventAccepted{}\n}", "func (o *DeleteCoreV1NamespacedPodAccepted) WithPayload(payload *models.IoK8sAPICoreV1Pod) *DeleteCoreV1NamespacedPodAccepted {\n\to.Payload = payload\n\treturn o\n}", "func NewDeleteCoreV1NamespacedServiceAccountAccepted() *DeleteCoreV1NamespacedServiceAccountAccepted {\n\treturn &DeleteCoreV1NamespacedServiceAccountAccepted{}\n}", "func NewDeleteCoreV1NamespacedConfigMapAccepted() *DeleteCoreV1NamespacedConfigMapAccepted {\n\n\treturn &DeleteCoreV1NamespacedConfigMapAccepted{}\n}", "func NewCreateCoreV1NamespacedPodBindingAccepted() *CreateCoreV1NamespacedPodBindingAccepted {\n\n\treturn &CreateCoreV1NamespacedPodBindingAccepted{}\n}", "func NewCreateCoreV1NamespacedPodTemplateAccepted() *CreateCoreV1NamespacedPodTemplateAccepted {\n\treturn &CreateCoreV1NamespacedPodTemplateAccepted{}\n}", "func NewDeleteCoreV1NamespacedLimitRangeAccepted() *DeleteCoreV1NamespacedLimitRangeAccepted {\n\treturn &DeleteCoreV1NamespacedLimitRangeAccepted{}\n}", "func NewCreateCoreV1NamespacedServiceAccountTokenAccepted() *CreateCoreV1NamespacedServiceAccountTokenAccepted {\n\n\treturn &CreateCoreV1NamespacedServiceAccountTokenAccepted{}\n}", "func NewCreateCoreV1NamespacedServiceAccountTokenAccepted() *CreateCoreV1NamespacedServiceAccountTokenAccepted {\n\treturn &CreateCoreV1NamespacedServiceAccountTokenAccepted{}\n}", "func NewCreateCoreV1NamespacedConfigMapAccepted() *CreateCoreV1NamespacedConfigMapAccepted {\n\n\treturn &CreateCoreV1NamespacedConfigMapAccepted{}\n}", "func NewDeleteCoreV1PersistentVolumeAccepted() *DeleteCoreV1PersistentVolumeAccepted {\n\treturn &DeleteCoreV1PersistentVolumeAccepted{}\n}", "func NewDeleteCoreV1NamespacedPodOK() *DeleteCoreV1NamespacedPodOK {\n\n\treturn &DeleteCoreV1NamespacedPodOK{}\n}", "func NewCreateCoreV1NamespaceAccepted() *CreateCoreV1NamespaceAccepted {\n\treturn &CreateCoreV1NamespaceAccepted{}\n}", "func NewDeleteCertificatesV1CertificateSigningRequestAccepted() *DeleteCertificatesV1CertificateSigningRequestAccepted {\n\n\treturn &DeleteCertificatesV1CertificateSigningRequestAccepted{}\n}", "func NewDeleteAppsV1NamespacedReplicaSetAccepted() *DeleteAppsV1NamespacedReplicaSetAccepted {\n\n\treturn &DeleteAppsV1NamespacedReplicaSetAccepted{}\n}", "func (o *DeleteCoreV1NamespacedConfigMapAccepted) WithPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) *DeleteCoreV1NamespacedConfigMapAccepted {\n\to.Payload = payload\n\treturn o\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithPayload adds the payload to the delete core v1 namespaced pod accepted response
func (o *DeleteCoreV1NamespacedPodAccepted) WithPayload(payload *models.IoK8sAPICoreV1Pod) *DeleteCoreV1NamespacedPodAccepted { o.Payload = payload return o }
[ "func (o *DeleteCoreV1NamespacedPodOK) WithPayload(payload *models.IoK8sAPICoreV1Pod) *DeleteCoreV1NamespacedPodOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteCoreV1NamespacedPodAccepted) SetPayload(payload *models.IoK8sAPICoreV1Pod) {\n\to.Payload = payload\n}", "func (o *DeleteCoreV1NamespacedConfigMapAccepted) WithPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) *DeleteCoreV1NamespacedConfigMapAccepted {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteCoreV1NamespacedPodOK) SetPayload(payload *models.IoK8sAPICoreV1Pod) {\n\to.Payload = payload\n}", "func (o *WeaviateSchemaActionsDeleteBadRequest) WithPayload(payload *models.ErrorResponse) *WeaviateSchemaActionsDeleteBadRequest {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeletePostbyIDOK) WithPayload(payload *models.Response) *DeletePostbyIDOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeletePolicyOK) WithPayload(payload *models.Policy) *DeletePolicyOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *ObjectsReferencesDeleteForbidden) WithPayload(payload *models.ErrorResponse) *ObjectsReferencesDeleteForbidden {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteCoreV1NamespacedConfigMapOK) WithPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) *DeleteCoreV1NamespacedConfigMapOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteCoreV1CollectionNamespacedEventOK) WithPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) *DeleteCoreV1CollectionNamespacedEventOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteCoreV1NamespacedConfigMapAccepted) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}", "func (o *DeleteUserOK) WithPayload(payload *models.DeletedResponse) *DeleteUserOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteCoreV1NamespacedConfigMapOK) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}", "func (o *DeleteAttachmentOK) WithPayload(payload models.DeleteAttachmentOKBody) *DeleteAttachmentOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *ObjectsReferencesDeleteInternalServerError) WithPayload(payload *models.ErrorResponse) *ObjectsReferencesDeleteInternalServerError {\n\to.Payload = payload\n\treturn o\n}", "func (o *ThingsDeleteForbidden) WithPayload(payload *models.ErrorResponse) *ThingsDeleteForbidden {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteAppsV1NamespacedReplicaSetOK) WithPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) *DeleteAppsV1NamespacedReplicaSetOK {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeletePostbyIDInternalServerError) WithPayload(payload *models.Response) *DeletePostbyIDInternalServerError {\n\to.Payload = payload\n\treturn o\n}", "func (o *DeleteDiscoveryV1beta1NamespacedEndpointSliceOK) WithPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) *DeleteDiscoveryV1beta1NamespacedEndpointSliceOK {\n\to.Payload = payload\n\treturn o\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SetPayload sets the payload to the delete core v1 namespaced pod accepted response
func (o *DeleteCoreV1NamespacedPodAccepted) SetPayload(payload *models.IoK8sAPICoreV1Pod) { o.Payload = payload }
[ "func (o *DeleteCoreV1NamespacedPodOK) SetPayload(payload *models.IoK8sAPICoreV1Pod) {\n\to.Payload = payload\n}", "func (o *DeleteCoreV1NamespacedConfigMapOK) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}", "func (o *DeleteCoreV1NamespacedConfigMapAccepted) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}", "func (o *DeleteOrganizationOK) SetPayload(payload *models.DeletedResponse) {\n\to.Payload = payload\n}", "func (o *DeleteCoreV1CollectionNamespacedEventOK) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}", "func (o *DeleteAppsV1NamespacedReplicaSetOK) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}", "func (o *DeleteDiscoveryV1beta1NamespacedEndpointSliceOK) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}", "func (o *WeaviateSchemaActionsDeleteBadRequest) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *DeleteNodeUnauthorized) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *DeletePostbyIDOK) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *ObjectsReferencesDeleteForbidden) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *DeletePostbyIDInternalServerError) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}", "func (o *DeleteNodeInternalServerError) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *DeleteOrgNodeOK) SetPayload(payload *models.Organization) {\n\to.Payload = payload\n}", "func (o *DeleteRuntimeEnvironmentOK) SetPayload(payload string) {\n\to.Payload = payload\n}", "func (o *DeleteApiextensionsV1CollectionCustomResourceDefinitionOK) SetPayload(payload *models.IoK8sApimachineryPkgApisMetaV1Status) {\n\to.Payload = payload\n}", "func (o *ThingsDeleteForbidden) SetPayload(payload *models.ErrorResponse) {\n\to.Payload = payload\n}", "func (o *DeleteUserOK) SetPayload(payload *models.DeletedResponse) {\n\to.Payload = payload\n}", "func (o *DeletePostbyIDUnauthorized) SetPayload(payload *models.Response) {\n\to.Payload = payload\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewDeleteCoreV1NamespacedPodUnauthorized creates DeleteCoreV1NamespacedPodUnauthorized with default headers values
func NewDeleteCoreV1NamespacedPodUnauthorized() *DeleteCoreV1NamespacedPodUnauthorized { return &DeleteCoreV1NamespacedPodUnauthorized{} }
[ "func NewDeleteCoreV1NamespacedPodTemplateUnauthorized() *DeleteCoreV1NamespacedPodTemplateUnauthorized {\n\treturn &DeleteCoreV1NamespacedPodTemplateUnauthorized{}\n}", "func NewCreateCoreV1NamespacedPodUnauthorized() *CreateCoreV1NamespacedPodUnauthorized {\n\treturn &CreateCoreV1NamespacedPodUnauthorized{}\n}", "func NewDeleteCoreV1NamespacedServiceUnauthorized() *DeleteCoreV1NamespacedServiceUnauthorized {\n\treturn &DeleteCoreV1NamespacedServiceUnauthorized{}\n}", "func NewCreateCoreV1NamespacedPodTemplateUnauthorized() *CreateCoreV1NamespacedPodTemplateUnauthorized {\n\treturn &CreateCoreV1NamespacedPodTemplateUnauthorized{}\n}", "func NewDeleteCoreV1NamespacedEventUnauthorized() *DeleteCoreV1NamespacedEventUnauthorized {\n\treturn &DeleteCoreV1NamespacedEventUnauthorized{}\n}", "func NewDeleteCoreV1NamespacedServiceAccountUnauthorized() *DeleteCoreV1NamespacedServiceAccountUnauthorized {\n\treturn &DeleteCoreV1NamespacedServiceAccountUnauthorized{}\n}", "func NewReadCoreV1NamespacedPodUnauthorized() *ReadCoreV1NamespacedPodUnauthorized {\n\treturn &ReadCoreV1NamespacedPodUnauthorized{}\n}", "func NewDeleteCoreV1NamespacedConfigMapUnauthorized() *DeleteCoreV1NamespacedConfigMapUnauthorized {\n\n\treturn &DeleteCoreV1NamespacedConfigMapUnauthorized{}\n}", "func NewCreateCoreV1NamespacedPodBindingUnauthorized() *CreateCoreV1NamespacedPodBindingUnauthorized {\n\n\treturn &CreateCoreV1NamespacedPodBindingUnauthorized{}\n}", "func NewCreateCoreV1NamespacedServiceAccountTokenUnauthorized() *CreateCoreV1NamespacedServiceAccountTokenUnauthorized {\n\n\treturn &CreateCoreV1NamespacedServiceAccountTokenUnauthorized{}\n}", "func NewCreateCoreV1NamespacedServiceAccountTokenUnauthorized() *CreateCoreV1NamespacedServiceAccountTokenUnauthorized {\n\treturn &CreateCoreV1NamespacedServiceAccountTokenUnauthorized{}\n}", "func NewPatchCoreV1NamespacedServiceAccountUnauthorized() *PatchCoreV1NamespacedServiceAccountUnauthorized {\n\treturn &PatchCoreV1NamespacedServiceAccountUnauthorized{}\n}", "func NewWatchCoreV1NamespacedPodTemplateListUnauthorized() *WatchCoreV1NamespacedPodTemplateListUnauthorized {\n\treturn &WatchCoreV1NamespacedPodTemplateListUnauthorized{}\n}", "func NewWatchCoreV1NamespacedEndpointsUnauthorized() *WatchCoreV1NamespacedEndpointsUnauthorized {\n\n\treturn &WatchCoreV1NamespacedEndpointsUnauthorized{}\n}", "func NewDeleteCoreV1CollectionNamespacedReplicationControllerUnauthorized() *DeleteCoreV1CollectionNamespacedReplicationControllerUnauthorized {\n\treturn &DeleteCoreV1CollectionNamespacedReplicationControllerUnauthorized{}\n}", "func NewDeleteCoreV1NamespacedLimitRangeUnauthorized() *DeleteCoreV1NamespacedLimitRangeUnauthorized {\n\treturn &DeleteCoreV1NamespacedLimitRangeUnauthorized{}\n}", "func NewDeleteCoreV1CollectionNamespacedEventUnauthorized() *DeleteCoreV1CollectionNamespacedEventUnauthorized {\n\n\treturn &DeleteCoreV1CollectionNamespacedEventUnauthorized{}\n}", "func NewDeleteCoreV1NamespacedPodOK() *DeleteCoreV1NamespacedPodOK {\n\n\treturn &DeleteCoreV1NamespacedPodOK{}\n}", "func NewCreateCoreV1NamespacedConfigMapUnauthorized() *CreateCoreV1NamespacedConfigMapUnauthorized {\n\n\treturn &CreateCoreV1NamespacedConfigMapUnauthorized{}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ID returns the unique name of the driver.
func (*Driver) ID() string { return DriverID }
[ "func (d *Driver) Name() string { return DriverName }", "func ID() (string, error) {\n\tid, err := hardwareId()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"hardwareid: %v\", err)\n\t}\n\treturn id, nil\n}", "func (v VirtualSwitch) ID() (string, error) {\n\tid, err := v.virtualSwitch.GetProperty(\"Name\")\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"GetProperty(Name)\")\n\t}\n\treturn id.Value().(string), nil\n}", "func (d *Driver) DriverName() string {\n\treturn DriverName\n}", "func (d *Driver) DriverName() string {\n\treturn driverName\n}", "func DriverName() string {\n\treturn driverName\n}", "func (db *DB) DriverName() string {\n return db.driverName\n}", "func DriverName() string {\n\tdriver, err := DriverNameByKey(DefaultConnectionName)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn driver\n}", "func ID() string {\n\treturn uuid.New().String()\n}", "func (s *Server) ID() string {\n\treturn s.Config().GetUuid()\n}", "func DriverName(c *cli.Context) string {\n\treturn c.GlobalString(DriverFlag)\n}", "func (b *impl) ID() string {\n\treturn b.id\n}", "func ID() (string, error) {\n\treturn GetHex(64)\n}", "func (u Unit) ID() string {\n\treturn u.String()\n}", "func (dev *SDRDevice) GetDriverKey() (driverKey string) {\n\n\tval := (*C.char)(C.SoapySDRDevice_getDriverKey(dev.device))\n\tdefer C.free(unsafe.Pointer(val))\n\n\treturn C.GoString(val)\n}", "func (h *Host) ID() string {\n\tif h.id == \"\" {\n\t\thash := md5.New()\n\t\t_, _ = io.WriteString(hash, h.IP+h.MAC)\n\t\th.id = fmt.Sprintf(\"%x\", hash.Sum(nil))\n\t}\n\n\treturn h.id\n}", "func (db *DB) DriverName() string {\n\treturn db.DB.DriverName()\n}", "func (c *Client) ID() string {\n\treturn c.uid\n}", "func (d *DropletBackend) ID() string {\n\treturn strconv.Itoa(d.Droplet.ID)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
LoadAll loads all indexes for given db and table
func (d *Driver) LoadAll(db, table string) ([]sql.Index, error) { var ( indexes []sql.Index errors []string root = filepath.Join(d.root, db, table) ) dirs, err := ioutil.ReadDir(root) if err != nil { if os.IsNotExist(err) { return indexes, nil } return nil, err } for _, info := range dirs { if info.IsDir() && !strings.HasPrefix(info.Name(), ".") { idx, err := d.loadIndex(db, table, info.Name()) if err != nil { if !errCorruptedIndex.Is(err) { errors = append(errors, err.Error()) } continue } indexes = append(indexes, idx) } } if len(errors) > 0 { return nil, fmt.Errorf(strings.Join(errors, "\n")) } return indexes, nil }
[ "func (d *Driver) LoadAll(db, table string) ([]sql.Index, error) {\n\tvar (\n\t\tindexes []sql.Index\n\t\terrors []string\n\t\troot = filepath.Join(d.root, db, table)\n\t)\n\n\tdirs, err := ioutil.ReadDir(root)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn indexes, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\tfor _, info := range dirs {\n\t\tif info.IsDir() && !strings.HasPrefix(info.Name(), \".\") {\n\t\t\tidx, err := d.loadIndex(db, table, info.Name())\n\t\t\tif err != nil {\n\t\t\t\tif !errCorruptedIndex.Is(err) {\n\t\t\t\t\terrors = append(errors, err.Error())\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tindexes = append(indexes, idx)\n\t\t}\n\t}\n\n\tif len(errors) > 0 {\n\t\treturn nil, fmt.Errorf(strings.Join(errors, \"\\n\"))\n\t}\n\n\treturn indexes, nil\n}", "func loadIndexs() {\n\tdb := open()\n\tindexs = make(map[string][]*Index)\n\tdb.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(INDEX_BUCKET))\n\t\tif b == nil {\n\t\t\tlogger.Infof(\"bucket[%s] not exist\", INDEX_BUCKET)\n\t\t\treturn nil\n\t\t}\n\t\tc := b.Cursor()\n\t\tfor k, v := c.First(); k != nil; k, v = c.Next() {\n\t\t\tkey := string(k)\n\t\t\tvar _indexs []string\n\t\t\terr := json.Unmarshal(v, &_indexs)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorf(\"parse index[%s] error -> %v\", k, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t__indexs := make([]*Index, len(_indexs)) \n\t\t\t// parse index\n\t\t\tfor i, _index := range _indexs {\n\t\t\t\tsps :=strings.Split(_index, INDEX_SPLIT) \n\t\t\t\tindex := &Index {\n\t\t\t\t\tbucket: key,\n\t\t\t\t\tindexs: sps,\n\t\t\t\t}\n\t\t\t\t__indexs[i] = index\n\t\t\t}\n\t\t\tindexs[key] = __indexs\n\t\t}\n\t\treturn nil\n\t})\n}", "func (tl *TypeLoader) LoadTableIndexes(typeTpl *models.Type, ixMap map[string]*models.Index) error {\n\tvar err error\n\tvar priIxLoaded bool\n\n\t// load indexes\n\tindexList, err := tl.source.IndexList(typeTpl.TableName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// process indexes\n\tfor _, ix := range indexList {\n\t\t// save whether or not the primary key index was processed\n\t\tpriIxLoaded = priIxLoaded || ix.IsPrimary\n\n\t\t// create index template\n\t\tixTpl := &models.Index{\n\t\t\tName: internal.SnakeToCamel(ix.IndexName),\n\t\t\tType: typeTpl,\n\t\t\tFields: []*models.Field{},\n\t\t\tIndexName: ix.IndexName,\n\t\t\tIsUnique: ix.IsUnique,\n\t\t\tIsPrimary: ix.IsPrimary,\n\t\t}\n\n\t\t// load index columns\n\t\terr = tl.LoadIndexColumns(ixTpl)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// build func name\n\t\tixTpl.FuncName = tl.buildIndexFuncName(ixTpl)\n\t\tixTpl.LegacyFuncName = tl.buildLegacyIndexFuncName(ixTpl)\n\n\t\tixMap[typeTpl.TableName+\"_\"+ix.IndexName] = ixTpl\n\t}\n\n\treturn nil\n}", "func (tl *TypeLoader) LoadIndexes(tableMap map[string]*models.Type) (map[string]*models.Index, error) {\n\tvar err error\n\n\tixMap := map[string]*models.Index{}\n\tfor _, t := range tableMap {\n\t\t// load table indexes\n\t\terr = tl.LoadTableIndexes(t, ixMap)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn ixMap, nil\n}", "func TestSQLSmith_LoadIndexes(t *testing.T) {\n\te := Executor{\n\t\tconn: nil,\n\t\tdb: dbname,\n\t\ttables: make(map[string]*types.Table),\n\t}\n\tindexes[\"users\"] = []types.CIStr{\"idx1\", \"idx2\"}\n\te.loadSchema(schema, indexes)\n\n\tassert.Equal(t, len(e.tables), 6)\n\tassert.Equal(t, len(e.tables[\"users\"].Indexes), 2)\n}", "func LoadIndexer(filename string) (*Indexer, error) {\n\tvar indexer Indexer\n\tvar err error\n\tindexer.wordInverted = make(map[uint64]map[uint64][]int)\n\tindexer.titleInverted = make(map[uint64]map[uint64][]int)\n\tindexer.db, err = bolt.Open(filename, 0666, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Ensure that all buckets exist\n\tindexer.db.Update(func(tx *bolt.Tx) error {\n\t\tfor i := 0; i < NumTable; i++ {\n\t\t\ttx.CreateBucketIfNotExists(intToByte(i))\n\t\t}\n\t\treturn nil\n\t})\n\treturn &indexer, nil\n}", "func ReloadAllTableInfo(ctx context.Context, filePath string) error {\n\tconf, err := getConfig(ctx, filePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, db := range conf.Databases {\n\t\tfor _, h := range conf.Hosts {\n\t\t\tif db.HostKey != h.Key {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\terr := openAdditional(ctx, h.User, h.Password, h.Address, db.Name, h.Port, h.Protocol)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\ttables, err := showtables(db.Name)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tdb.Tables = tables\n\t\t}\n\t}\n\n\treturn setConfig(ctx, conf, filePath)\n}", "func (db *DB) LoadAll(ctx context.Context, ids ...string) ([]*document.Document, error) {\n\tvar docs []*document.Document\n\n\treturn docs, db.Read(\n\t\tctx,\n\t\tFetchAll(\n\t\t\tfunc(d *document.Document) (bool, error) {\n\t\t\t\tdocs = append(docs, d)\n\t\t\t\treturn true, nil\n\t\t\t},\n\t\t),\n\t)\n}", "func LoadAll(args ...interface{}) error {\n\treturn doAll(Load, args...)\n}", "func (DBM *DBManager) loadAllDatabases(path string) (err error) {\n\tdatabasesFiles, err := listDatabases(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, databaseFile := range databasesFiles {\n\t\tdb, err := data.LoadDatabase(filepath.Join(path, databaseFile.Name()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tDBM.databases.Store(databaseFile.Name(), db)\n\t}\n\treturn nil\n}", "func LoadAll(m Model, query M, into interface{}) error {\n\ts := session()\n\tdefer s.Close()\n\n\treturn c(s, m).Find(query).All(into)\n}", "func (s *Store) loadAll() error {\n\tfiles, err := s.ListFiles(s.Dir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to list %s: %v\", s.Dir, err)\n\t}\n\n\tfor _, file := range files {\n\t\tfilepath := path.Join(s.Dir, file)\n\t\terr := s.loadPath(filepath)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to load %s: %v\", filepath, err)\n\t\t}\n\t}\n\treturn nil\n}", "func LoadAll(ctx context.Context, db gorp.SqlExecutor, opts ...LoadOptionFunc) (sdk.Groups, error) {\n\tquery := gorpmapping.NewQuery(`\n SELECT *\n FROM \"group\"\n ORDER BY \"group\".name\n `)\n\treturn getAll(ctx, db, query, opts...)\n}", "func OrTableIndexes(db models.XODB, schema string, table string) ([]*models.Index, error) {\n\tvar err error\n\n\t// sql query\n\tconst sqlstr = `SELECT ` +\n\t\t`LOWER(i.index_name) AS index_name, ` +\n\t\t`CASE WHEN i.uniqueness = 'UNIQUE' THEN '1' ELSE '0' END AS is_unique, ` +\n\t\t`CASE WHEN c.constraint_type = 'P' THEN '1' ELSE '0' END AS is_primary ` +\n\t\t`FROM user_indexes i ` +\n\t\t`LEFT JOIN user_constraints c on i.INDEX_NAME = c.constraint_name ` +\n\t\t`WHERE i.TABLE_OWNER = UPPER(:1) AND i.TABLE_NAME = :2`\n\n\t// run query\n\tmodels.XOLog(sqlstr, schema, table)\n\tq, err := db.Query(sqlstr, schema, table)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer q.Close()\n\n\t// load results\n\tres := []*models.Index{}\n\tfor q.Next() {\n\t\ti := models.Index{}\n\n\t\t// scan\n\t\terr = q.Scan(&i.IndexName, &i.IsUnique, &i.IsPrimary)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tres = append(res, &i)\n\t}\n\n\treturn res, nil\n}", "func (mi *MasterIndex) All() []*Index {\n\tmi.idxMutex.Lock()\n\tdefer mi.idxMutex.Unlock()\n\n\treturn mi.idx\n}", "func (e *engine) load() error {\n\tdatabaseNames, err := listDir(e.cfg.Dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, databaseName := range databaseNames {\n\t\t_, err := e.CreateDatabase(databaseName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func LoadAll(db gorp.SqlExecutor) ([]sdk.Model, error) {\n\tquery := fmt.Sprintf(`\n SELECT %s\n FROM worker_model\n JOIN \"group\" ON worker_model.group_id = \"group\".id\n ORDER BY worker_model.name\n `, modelColumns)\n\treturn loadAll(db, false, query)\n}", "func ForAllIndexes(ctx context.Context, repo restic.Repository,\n\tfn func(id restic.ID, index *Index, oldFormat bool, err error) error) error {\n\n\tdebug.Log(\"Start\")\n\n\ttype FileInfo struct {\n\t\trestic.ID\n\t\tSize int64\n\t}\n\n\tvar m sync.Mutex\n\n\t// track spawned goroutines using wg, create a new context which is\n\t// cancelled as soon as an error occurs.\n\twg, ctx := errgroup.WithContext(ctx)\n\n\tch := make(chan FileInfo)\n\t// send list of index files through ch, which is closed afterwards\n\twg.Go(func() error {\n\t\tdefer close(ch)\n\t\treturn repo.List(ctx, restic.IndexFile, func(id restic.ID, size int64) error {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn nil\n\t\t\tcase ch <- FileInfo{id, size}:\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t})\n\n\t// a worker receives an index ID from ch, loads the index, and sends it to indexCh\n\tworker := func() error {\n\t\tvar buf []byte\n\t\tfor fi := range ch {\n\t\t\tdebug.Log(\"worker got file %v\", fi.ID.Str())\n\t\t\tvar err error\n\t\t\tvar idx *Index\n\t\t\toldFormat := false\n\n\t\t\tbuf, err = repo.LoadAndDecrypt(ctx, buf[:0], restic.IndexFile, fi.ID)\n\t\t\tif err == nil {\n\t\t\t\tidx, oldFormat, err = DecodeIndex(buf, fi.ID)\n\t\t\t}\n\n\t\t\tm.Lock()\n\t\t\terr = fn(fi.ID, idx, oldFormat, err)\n\t\t\tm.Unlock()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\t// run workers on ch\n\twg.Go(func() error {\n\t\treturn RunWorkers(loadIndexParallelism, worker)\n\t})\n\n\treturn wg.Wait()\n}", "func LoadAll(ctx context.Context, db gorp.SqlExecutor, opts ...LoadOptionFunc) ([]sdk.WorkflowTemplate, error) {\n\tquery := gorpmapping.NewQuery(\"SELECT * FROM workflow_template\")\n\treturn getAll(ctx, db, query, opts...)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete the given index for all partitions in the iterator.
func (d *Driver) Delete(i sql.Index, partitions sql.PartitionIter) error { idx, ok := i.(*pilosaIndex) if !ok { return errInvalidIndexType.New(i) } if idx.cancel != nil { idx.cancel() idx.wg.Wait() } if err := idx.index.Open(); err != nil { return err } defer idx.index.Close() if err := os.RemoveAll(filepath.Join(d.root, i.Database(), i.Table(), i.ID())); err != nil { return err } for { p, err := partitions.Next() if err != nil { if err == io.EOF { break } return err } for _, ex := range idx.Expressions() { name := fieldName(idx.ID(), ex, p) field := idx.index.Field(name) if field == nil { continue } if err = idx.index.DeleteField(name); err != nil { return err } } } return partitions.Close() }
[ "func deleteAllInt(index int, testMode bool) error {\n\tallPaths, errPaths := getPaths()\n\tif errPaths != nil {\n\t\tcolor.Red(\":: Error while reading .tempestcf\")\n\t\treturn errPaths\n\t}\n\tif index >= 0 && index < len(allPaths) {\n\t\tfor indx, indPath := range allPaths {\n\t\t\tif index == indx {\n\t\t\t\t// color.Cyan(indPath)\n\t\t\t\tfInt, fInfo, eInt := fetchAll(indPath)\n\t\t\t\tif eInt != nil {\n\t\t\t\t\tfmt.Println(\"-----\", eInt)\n\t\t\t\t}\n\t\t\t\tif fInfo != nil {\n\t\t\t\t\treturn emptyFile(indPath, fInfo, testMode)\n\t\t\t\t}\n\t\t\t\treturn deleteAllStr(indPath, fInt, testMode)\n\t\t\t}\n\t\t}\n\t}\n\treturn errors.New(\"Nothing to purge\")\n}", "func (t *AutoTraceChaincode) deleteIndex(stub shim.ChaincodeStubInterface, indexName string, attributes []string) error {\n\tfmt.Println(\"- start delete index\")\n\tvar err error\n\t// ==== Index the object to enable range queries, e.g. return all parts made by supplier b ====\n\t// An 'index' is a normal key/value entry in state.\n\t// The key is a composite key, with the elements that you want to range query on listed first.\n\t// This will enable very efficient state range queries based on composite keys matching indexName~color~*\n\tindexKey, err := stub.CreateCompositeKey(indexName, attributes)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Delete index by key\n\tstub.DelState(indexKey)\n\n\tfmt.Println(\"- end delete index\")\n\treturn nil\n}", "func (c *Collection) indexDelete(tx ds.Txn, key ds.Key, originalData []byte) error {\n\tfor path, index := range c.indexes {\n\t\terr := c.indexUpdate(path, index, tx, key, originalData, true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (c *Collection) DeleteIndex(name string) {\n\tvar index *BleveIndex\n\tfor i, tmpIndex := range c.bleveIndexes {\n\t\tif tmpIndex.name == name {\n\t\t\tindex = tmpIndex\n\n\t\t\tcopy(c.bleveIndexes[i:], c.bleveIndexes[i+1:])\n\t\t\tc.bleveIndexes[len(c.bleveIndexes)-1] = nil // or the zero value of T\n\t\t\tc.bleveIndexes = c.bleveIndexes[:len(c.bleveIndexes)-1]\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\tindex.close()\n\tindex.delete()\n\n\tc.db.badger.DropPrefix(index.prefix)\n}", "func SliceDeleteAtIndex(sl *[]Ki, idx int) error {\n\tif err := SliceIsValidIndex(sl, idx); err != nil {\n\t\treturn err\n\t}\n\t// this copy makes sure there are no memory leaks\n\tsz := len(*sl)\n\tcopy((*sl)[idx:], (*sl)[idx+1:])\n\t(*sl)[sz-1] = nil\n\t(*sl) = (*sl)[:sz-1]\n\treturn nil\n}", "func (c *index) Drop(rm kv.RetrieverMutator) error {\n\tit, err := rm.Iter(c.prefix, c.prefix.PrefixNext())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer it.Close()\n\n\t// remove all indices\n\tfor it.Valid() {\n\t\tif !it.Key().HasPrefix(c.prefix) {\n\t\t\tbreak\n\t\t}\n\t\terr := rm.Delete(it.Key())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = it.Next()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func deleteIndex(stub shim.ChaincodeStubInterface, consent consent) error {\n\tlogger.Debug(\"deleteIndex(consentID=\"+ consent.ConsentID+\") : calling method -\")\n\tAppIndex, err := stub.CreateCompositeKey(indexApp, []string{consent.AppID, consent.ConsentID})\n\tif err != nil {\n\t\tlogger.Error(err.Error())\n\t\treturn err\n\t}\n\tstub.DelState(AppIndex)\n\n\tOwnerIndex, err := stub.CreateCompositeKey(indexOwner, []string{consent.AppID, consent.OwnerID,\n\t\tconsent.ConsentID})\n\tif err != nil {\n\t\tlogger.Error(err.Error())\n\t\treturn err\n\t}\n\tstub.DelState(OwnerIndex)\n\n\tConsumerIndex, err := stub.CreateCompositeKey(indexConsumer, []string{consent.AppID, consent.ConsumerID,\n\t\tconsent.ConsentID})\n\tif err != nil {\n\t\tlogger.Error(err.Error())\n\t\treturn err\n\t}\n\tstub.DelState(ConsumerIndex)\n\n\tIsConsentIndex, err := stub.CreateCompositeKey(indexIsConsent, []string{consent.AppID, consent.OwnerID,\n\t\tconsent.ConsumerID, consent.DataType, consent.DataAccess, consent.ConsentID})\n\tif err != nil {\n\t\tlogger.Error(err.Error())\n\t\treturn err\n\t}\n\tstub.DelState(IsConsentIndex)\n\treturn nil\n}", "func (this *MyLinkedList) DeleteAtIndex(index int) {\n\tlinkLen := this.CountLinkLen()\n\tif linkLen -1 <= index {\n\t\treturn\n\t}\n\n\tnowIndex := 0\n\ttemp := this.Head\n\tfor {\n\t\tif nowIndex == index {\n\t\t\tif temp.Next.Next == nil {\n\t\t\t\ttemp.Next = nil\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttemp.Next.Next.Pre = temp\n\t\t\ttemp.Next = temp.Next.Next\n\t\t\tbreak\n\t\t}\n\t\ttemp = temp.Next\n\t\tnowIndex += 1\n\t}\n}", "func (p *IntArray) Delete_at(del_index int) {\n\ttmp := *p\n\tvar new_array IntArray\n\tfor i := 0; i < len(tmp); i++ {\n\t\tif i != del_index {\n\t\t\tnew_array = append(new_array, tmp[i])\n\t\t}\n\t}\n\t*p = new_array\n}", "func (b *Bag) DeleteAt(index int) {\n\tb.items[index] = b.items[len(b.items)-1]\n\tb.items = b.items[:len(b.items)-1]\n}", "func (s *Service) DeleteIdx(c context.Context, nwMsg []byte) (err error) {\n\tvar opinion *model.Opinion\n\tif err = json.Unmarshal(nwMsg, &opinion); err != nil {\n\t\tlog.Error(\"json.Unmarshal(%s) error(%v)\", string(nwMsg), err)\n\t\treturn\n\t}\n\ts.dao.DelOpinionCache(c, opinion.Vid)\n\ts.dao.DelCaseIdx(c, opinion.Cid)\n\ts.dao.DelVoteIdx(c, opinion.Cid)\n\treturn\n}", "func (this *MyLinkedList) DeleteAtIndex(index int) {\n\tcurr := this.head\n\n\tif index >= this.size {\n\t\treturn\n\t} else if this.size == 1 {\n\t\tthis.head.val = 0\n\t\tthis.size--\n\t\treturn\n\t} else if index == 0 {\n\t\tthis.head = curr.next\n\t}\n\n\tfor ; index > 1; index-- {\n\t\tcurr = curr.next\n\t}\n\tcurr.next = curr.next.next\n\tthis.size--\n}", "func (c *OrderedMap) DeleteIndex(index int) (string, interface{}) {\n\tkey := c.Keys[index]\n\tvalue := c.Map[key]\n\tdelete(c.Map, key)\n\tc.Keys = append(c.Keys[:index], c.Keys[index+1:]...)\n\treturn key, value\n}", "func (this *MyLinkedList) DeleteAtIndex(index int) {\n\n\tif index == 0 {\n\t\tif this.next != nil {\n\t\t\tthis.val = this.next.val\n\t\t\tthis.next = this.next.next\n\t\t} else {\n\t\t\tthis.val = -1\n\t\t\tthis.next = nil\n\t\t\tthis.use = false\n\t\t}\n\t\treturn\n\t}\n\ti := 1\n\tnode := this\n\tfor node.next != nil {\n\t\tif i == index {\n\t\t\tnode.next = node.next.next\n\t\t\tbreak\n\t\t}\n\t\ti++\n\t\tnode = node.next\n\t}\n}", "func (h *binaryHeap) removeIdx(idx int) {\n\tif h.invalidNode(idx) {\n\t\treturn\n\t}\n\th.swapIdx(idx, h.len)\n\th.tree[h.len] = 0\n\th.len--\n\th.bubbleDown(idx)\n}", "func (c *index) Delete(sc *stmtctx.StatementContext, m kv.Mutator, indexedValues []types.Datum, h int64, ss kv.Transaction) error {\n\tkey, _, err := c.GenIndexKey(sc, indexedValues, h, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = m.Delete(key)\n\tif ss != nil {\n\t\tswitch c.idxInfo.State {\n\t\tcase model.StatePublic:\n\t\t\t// If the index is in public state, delete this index means it must exists.\n\t\t\tss.SetAssertion(key, kv.Exist)\n\t\tdefault:\n\t\t\tss.SetAssertion(key, kv.None)\n\t\t}\n\t}\n\treturn err\n}", "func DeleteAt[S ~[]T, T any](items S, indices ...int) (S, error) {\n\tif len(indices) == 0 || len(items) == 0 {\n\t\treturn items, nil\n\t}\n\n\tfor _, index := range indices {\n\t\tif index >= len(items) {\n\t\t\treturn items, ErrOutOfRange\n\t\t}\n\t}\n\n\tresult := make([]T, 0, len(items)-1)\n\tfor i, el := range items {\n\t\tadd := true\n\t\tfor _, index := range indices {\n\t\t\tif i == index {\n\t\t\t\tadd = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif add {\n\t\t\tresult = append(result, el)\n\t\t}\n\t}\n\treturn result, nil\n}", "func deleteIndexOp(bucket *bolt.Bucket, k []byte) error {\n\treturn bucket.Delete(k)\n}", "func (a *Array) Delete(index uint) (int, error) {\n\tif a.isIndexOutOfRange(index) {\n\t\treturn 0, errors.New(\"out of index range\")\n\t}\n\tv := a.data[index]\n\tfor i := index; i < a.Len()-1; i++ {\n\t\ta.data[i] = a.data[i+1]\n\t}\n\ta.length--\n\treturn v, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
dedupLiveResources handles removes live resource duplicates with the same UID. Duplicates are created in a separate resource groups. E.g. apps/Deployment produces duplicate in extensions/Deployment, authorization.openshift.io/ClusterRole produces duplicate in rbac.authorization.k8s.io/ClusterRole etc. The method removes such duplicates unless it was defined in git ( exists in target resources list ). At least one duplicate stays. If non of duplicates are in git at random one stays
func dedupLiveResources(targetObjs []*unstructured.Unstructured, liveObjsByKey map[kubeutil.ResourceKey]*unstructured.Unstructured) { targetObjByKey := make(map[kubeutil.ResourceKey]*unstructured.Unstructured) for i := range targetObjs { targetObjByKey[kubeutil.GetResourceKey(targetObjs[i])] = targetObjs[i] } liveObjsById := make(map[types.UID][]*unstructured.Unstructured) for k := range liveObjsByKey { obj := liveObjsByKey[k] if obj != nil { liveObjsById[obj.GetUID()] = append(liveObjsById[obj.GetUID()], obj) } } for id := range liveObjsById { objs := liveObjsById[id] if len(objs) > 1 { duplicatesLeft := len(objs) for i := range objs { obj := objs[i] resourceKey := kubeutil.GetResourceKey(obj) if _, ok := targetObjByKey[resourceKey]; !ok { delete(liveObjsByKey, resourceKey) duplicatesLeft-- if duplicatesLeft == 1 { break } } } } } }
[ "func (r *ContainerizedWorkloadReconciler) cleanupResources(ctx context.Context,\n\tworkload *oamv1alpha2.ContainerizedWorkload, deployUID, serviceUID *types.UID) error {\n\tlog := r.Log.WithValues(\"gc deployment\", workload.Name)\n\tvar deploy appsv1.Deployment\n\tvar service corev1.Service\n\tfor _, res := range workload.Status.Resources {\n\t\tuid := res.UID\n\t\tif res.Kind == KindDeployment {\n\t\t\tif uid != *deployUID {\n\t\t\t\tlog.Info(\"Found an orphaned deployment\", \"deployment UID\", *deployUID, \"orphaned UID\", uid)\n\t\t\t\tdn := client.ObjectKey{Name: res.Name, Namespace: workload.Namespace}\n\t\t\t\tif err := r.Get(ctx, dn, &deploy); err != nil {\n\t\t\t\t\tif apierrors.IsNotFound(err) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := r.Delete(ctx, &deploy); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlog.Info(\"Removed an orphaned deployment\", \"deployment UID\", *deployUID, \"orphaned UID\", uid)\n\t\t\t}\n\t\t} else if res.Kind == KindService {\n\t\t\tif uid != *serviceUID {\n\t\t\t\tlog.Info(\"Found an orphaned service\", \"orphaned UID\", uid)\n\t\t\t\tsn := client.ObjectKey{Name: res.Name, Namespace: workload.Namespace}\n\t\t\t\tif err := r.Get(ctx, sn, &service); err != nil {\n\t\t\t\t\tif apierrors.IsNotFound(err) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := r.Delete(ctx, &service); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlog.Info(\"Removed an orphaned service\", \"orphaned UID\", uid)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func removeDuplicateResources(resources []puppetquery.Resource) []puppetquery.Resource {\n\t// no duplicates possible\n\tif len(resources) < 2 {\n\t\treturn resources\n\t}\n\n\tseen := make(map[string]bool, len(resources))\n\tuniq := make([]puppetquery.Resource, 0, len(resources))\n\n\tseen[resources[0].Title] = true\n\tuniq = append(uniq, resources[0])\n\tfor _, r := range resources[1:] {\n\t\tif seen[r.Title] {\n\t\t\tcontinue\n\t\t}\n\t\tseen[r.Title] = true\n\t\tuniq = append(uniq, r)\n\t}\n\treturn uniq\n}", "func dedupeDeleteIdentical(ctx context.Context, ht hash.Type, remote string, objs []fs.Object) (remainingObjs []fs.Object) {\n\tci := fs.GetConfig(ctx)\n\n\t// Make map of IDs\n\tIDs := make(map[string]int, len(objs))\n\tfor _, o := range objs {\n\t\tif do, ok := o.(fs.IDer); ok {\n\t\t\tif ID := do.ID(); ID != \"\" {\n\t\t\t\tIDs[ID]++\n\t\t\t}\n\t\t}\n\t}\n\n\t// Remove duplicate IDs\n\tnewObjs := objs[:0]\n\tfor _, o := range objs {\n\t\tif do, ok := o.(fs.IDer); ok {\n\t\t\tif ID := do.ID(); ID != \"\" {\n\t\t\t\tif IDs[ID] <= 1 {\n\t\t\t\t\tnewObjs = append(newObjs, o)\n\t\t\t\t} else {\n\t\t\t\t\tfs.Logf(o, \"Ignoring as it appears %d times in the listing and deleting would lead to data loss\", IDs[ID])\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tobjs = newObjs\n\n\t// See how many of these duplicates are identical\n\tdupesByID := make(map[string][]fs.Object, len(objs))\n\tfor _, o := range objs {\n\t\tID := \"\"\n\t\tif ci.SizeOnly && o.Size() >= 0 {\n\t\t\tID = fmt.Sprintf(\"size %d\", o.Size())\n\t\t} else if ht != hash.None {\n\t\t\thashValue, err := o.Hash(ctx, ht)\n\t\t\tif err == nil && hashValue != \"\" {\n\t\t\t\tID = fmt.Sprintf(\"%v %s\", ht, hashValue)\n\t\t\t}\n\t\t}\n\t\tif ID == \"\" {\n\t\t\tremainingObjs = append(remainingObjs, o)\n\t\t} else {\n\t\t\tdupesByID[ID] = append(dupesByID[ID], o)\n\t\t}\n\t}\n\n\t// Delete identical duplicates, filling remainingObjs with the ones remaining\n\tfor ID, dupes := range dupesByID {\n\t\tremainingObjs = append(remainingObjs, dupes[0])\n\t\tif len(dupes) > 1 {\n\t\t\tfs.Logf(remote, \"Deleting %d/%d identical duplicates (%s)\", len(dupes)-1, len(dupes), ID)\n\t\t\tfor _, o := range dupes[1:] {\n\t\t\t\terr := DeleteFile(ctx, o)\n\t\t\t\tif err != nil {\n\t\t\t\t\tremainingObjs = append(remainingObjs, o)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn remainingObjs\n}", "func (r *FooReconciler) cleanupOwnedResources(ctx context.Context, log logr.Logger, foo *batchv1.Foo) error {\n\tlog.Info(\"finding existing Deployments for MyKind resource\")\n\n\t// List all deployment resources owned by this MyKind\n\tvar deployments apps.DeploymentList\n\t//if err := r.List(ctx, &deployments, client.InNamespace(foo.Namespace), client.MatchingField(deploymentOwnerKey, foo.Name)); err != nil {\n\t//\treturn err\n\t//}\n\n\tdeleted := 0\n\tfor _, depl := range deployments.Items {\n\t\tif depl.Name == foo.Spec.Name {\n\t\t\t// If this deployment's name matches the one on the MyKind resource\n\t\t\t// then do not delete it.\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := r.Client.Delete(ctx, &depl); err != nil {\n\t\t\tlog.Error(err, \"failed to delete Deployment resource\")\n\t\t\treturn err\n\t\t}\n\n\t\tr.Recorder.Eventf(foo, core.EventTypeNormal, \"Deleted\", \"Deleted deployment %q\", depl.Name)\n\t\tdeleted++\n\t}\n\n\tlog.Info(\"finished cleaning up old Deployment resources\", \"number_deleted\", deleted)\n\n\treturn nil\n}", "func dedupeInteractive(ctx context.Context, f fs.Fs, ht hash.Type, remote string, objs []fs.Object, byHash bool) bool {\n\tdedupeList(ctx, f, ht, remote, objs, byHash)\n\tcommands := []string{\"sSkip and do nothing\", \"kKeep just one (choose which in next step)\"}\n\tif !byHash {\n\t\tcommands = append(commands, \"rRename all to be different (by changing file.jpg to file-1.jpg)\")\n\t}\n\tcommands = append(commands, \"qQuit\")\n\tswitch config.Command(commands) {\n\tcase 's':\n\tcase 'k':\n\t\tkeep := config.ChooseNumber(\"Enter the number of the file to keep\", 1, len(objs))\n\t\tdedupeDeleteAllButOne(ctx, keep-1, remote, objs)\n\tcase 'r':\n\t\tdedupeRename(ctx, f, remote, objs)\n\tcase 'q':\n\t\treturn false\n\t}\n\treturn true\n}", "func uniqResources(resources []metav1.APIResource) []metav1.APIResource {\n\tseen := make(map[string]struct{}, len(resources))\n\ti := 0\n\tfor _, k := range resources {\n\t\tif _, ok := seen[k.Name]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tseen[k.Name] = struct{}{}\n\t\tresources[i] = k\n\n\t\ti++\n\t}\n\treturn resources[:i]\n}", "func dedupeDeleteAllButOne(ctx context.Context, keep int, remote string, objs []fs.Object) {\n\tcount := 0\n\tfor i, o := range objs {\n\t\tif i == keep {\n\t\t\tcontinue\n\t\t}\n\t\terr := DeleteFile(ctx, o)\n\t\tif err == nil {\n\t\t\tcount++\n\t\t}\n\t}\n\tif count > 0 {\n\t\tfs.Logf(remote, \"Deleted %d extra copies\", count)\n\t}\n}", "func dedupResourceInfos(infos []ResourceInfo) []ResourceInfo {\n\tvar ret []ResourceInfo\n\tseen := make(map[schema.GroupResource]struct{})\n\tfor _, info := range infos {\n\t\tgr := info.GroupResource\n\t\tif _, ok := seen[gr]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tgvrs := info.EquivalentResourceMapper.EquivalentResourcesFor(gr.WithVersion(\"\"), \"\")\n\t\tfor _, gvr := range gvrs {\n\t\t\tseen[gvr.GroupResource()] = struct{}{}\n\t\t}\n\t\tret = append(ret, info)\n\t}\n\treturn ret\n}", "func TestListResources_DuplicateResourceFilterByLabel(t *testing.T) {\n\tt.Parallel()\n\tctx := context.Background()\n\n\tbackend, err := lite.NewWithConfig(ctx, lite.Config{\n\t\tPath: t.TempDir(),\n\t\tClock: clockwork.NewFakeClock(),\n\t})\n\trequire.NoError(t, err)\n\n\tpresence := NewPresenceService(backend)\n\n\t// Same resource name, but have different labels.\n\tnames := []string{\"a\", \"a\", \"a\", \"a\"}\n\tlabels := []map[string]string{\n\t\t{\"env\": \"prod\"},\n\t\t{\"env\": \"dev\"},\n\t\t{\"env\": \"qa\"},\n\t\t{\"env\": \"dev\"},\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tkind string\n\t\tinsertResources func()\n\t\twantNames []string\n\t}{\n\t\t{\n\t\t\tname: \"KindDatabaseServer\",\n\t\t\tkind: types.KindDatabaseServer,\n\t\t\tinsertResources: func() {\n\t\t\t\tfor i := 0; i < len(names); i++ {\n\t\t\t\t\tdb, err := types.NewDatabaseServerV3(types.Metadata{\n\t\t\t\t\t\tName: fmt.Sprintf(\"name-%v\", i),\n\t\t\t\t\t}, types.DatabaseServerSpecV3{\n\t\t\t\t\t\tHostID: \"_\",\n\t\t\t\t\t\tHostname: \"_\",\n\t\t\t\t\t\tDatabase: &types.DatabaseV3{\n\t\t\t\t\t\t\tMetadata: types.Metadata{\n\t\t\t\t\t\t\t\tName: names[i],\n\t\t\t\t\t\t\t\tLabels: labels[i],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: types.DatabaseSpecV3{\n\t\t\t\t\t\t\t\tProtocol: \"_\",\n\t\t\t\t\t\t\t\tURI: \"_\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t})\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\t_, err = presence.UpsertDatabaseServer(ctx, db)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"KindAppServer\",\n\t\t\tkind: types.KindAppServer,\n\t\t\tinsertResources: func() {\n\t\t\t\tfor i := 0; i < len(names); i++ {\n\t\t\t\t\tserver, err := types.NewAppServerV3(types.Metadata{\n\t\t\t\t\t\tName: fmt.Sprintf(\"name-%v\", i),\n\t\t\t\t\t}, types.AppServerSpecV3{\n\t\t\t\t\t\tHostID: \"_\",\n\t\t\t\t\t\tApp: &types.AppV3{\n\t\t\t\t\t\t\tMetadata: types.Metadata{\n\t\t\t\t\t\t\t\tName: names[i],\n\t\t\t\t\t\t\t\tLabels: labels[i],\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: types.AppSpecV3{URI: \"_\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t})\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\t_, err = presence.UpsertApplicationServer(ctx, server)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"KindKubernetesCluster\",\n\t\t\tkind: types.KindKubernetesCluster,\n\t\t\tinsertResources: func() {\n\t\t\t\tfor i := 0; i < len(names); i++ {\n\n\t\t\t\t\tkube, err := types.NewKubernetesClusterV3(\n\t\t\t\t\t\ttypes.Metadata{\n\t\t\t\t\t\t\tName: names[i],\n\t\t\t\t\t\t\tLabels: labels[i],\n\t\t\t\t\t\t},\n\t\t\t\t\t\ttypes.KubernetesClusterSpecV3{},\n\t\t\t\t\t)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\tkubeServer, err := types.NewKubernetesServerV3FromCluster(\n\t\t\t\t\t\tkube,\n\t\t\t\t\t\tfmt.Sprintf(\"host-%v\", i),\n\t\t\t\t\t\tfmt.Sprintf(\"hostID-%v\", i),\n\t\t\t\t\t)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\t// Upsert server.\n\t\t\t\t\t_, err = presence.UpsertKubernetesServer(ctx, kubeServer)\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\ttc.insertResources()\n\n\t\t\t// Look among the duplicated resource by label\n\t\t\tresp, err := presence.ListResources(ctx, proto.ListResourcesRequest{\n\t\t\t\tResourceType: tc.kind,\n\t\t\t\tNeedTotalCount: true,\n\t\t\t\tLimit: 5,\n\t\t\t\tSearchKeywords: []string{\"dev\"},\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Len(t, resp.Resources, 1)\n\t\t\trequire.Equal(t, 1, resp.TotalCount)\n\t\t\trequire.Equal(t, map[string]string{\"env\": \"dev\"}, resp.Resources[0].GetAllLabels())\n\t\t})\n\t}\n}", "func (sc *syncContext) generateSyncTasks() ([]syncTask, bool) {\n\tsyncTasks := make([]syncTask, 0)\n\tsuccessful := true\n\tfor _, resourceState := range sc.compareResult.managedResources {\n\t\tif resourceState.Hook {\n\t\t\tcontinue\n\t\t}\n\t\tif sc.syncResources == nil ||\n\t\t\t(resourceState.Live != nil && argo.ContainsSyncResource(resourceState.Live.GetName(), resourceState.Live.GroupVersionKind(), sc.syncResources)) ||\n\t\t\t(resourceState.Target != nil && argo.ContainsSyncResource(resourceState.Target.GetName(), resourceState.Target.GroupVersionKind(), sc.syncResources)) {\n\n\t\t\tskipDryRun := false\n\t\t\tvar targetObj *unstructured.Unstructured\n\t\t\tif resourceState.Target != nil {\n\t\t\t\ttargetObj = resourceState.Target.DeepCopy()\n\t\t\t\tif targetObj.GetNamespace() == \"\" {\n\t\t\t\t\t// If target object's namespace is empty, we set namespace in the object. We do\n\t\t\t\t\t// this even though it might be a cluster-scoped resource. This prevents any\n\t\t\t\t\t// possibility of the resource from unintentionally becoming created in the\n\t\t\t\t\t// namespace during the `kubectl apply`\n\t\t\t\t\ttargetObj.SetNamespace(sc.namespace)\n\t\t\t\t}\n\t\t\t\tgvk := targetObj.GroupVersionKind()\n\t\t\t\tserverRes, err := kube.ServerResourceForGroupVersionKind(sc.disco, gvk)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\t// Special case for custom resources: if CRD is not yet known by the K8s API server,\n\t\t\t\t\t// skip verification during `kubectl apply --dry-run` since we expect the CRD\n\t\t\t\t\t// to be created during app synchronization.\n\t\t\t\t\tif apierr.IsNotFound(err) && hasCRDOfGroupKind(sc.compareResult.managedResources, gvk.Group, gvk.Kind) {\n\t\t\t\t\t\tskipDryRun = true\n\t\t\t\t\t} else {\n\t\t\t\t\t\tsc.setResourceDetails(&appv1.ResourceResult{\n\t\t\t\t\t\t\tName: targetObj.GetName(),\n\t\t\t\t\t\t\tGroup: gvk.Group,\n\t\t\t\t\t\t\tVersion: gvk.Version,\n\t\t\t\t\t\t\tKind: targetObj.GetKind(),\n\t\t\t\t\t\t\tNamespace: targetObj.GetNamespace(),\n\t\t\t\t\t\t\tMessage: err.Error(),\n\t\t\t\t\t\t\tStatus: appv1.ResultCodeSyncFailed,\n\t\t\t\t\t\t})\n\t\t\t\t\t\tsuccessful = false\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif !sc.proj.IsResourcePermitted(metav1.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, serverRes.Namespaced) {\n\t\t\t\t\t\tsc.setResourceDetails(&appv1.ResourceResult{\n\t\t\t\t\t\t\tName: targetObj.GetName(),\n\t\t\t\t\t\t\tGroup: gvk.Group,\n\t\t\t\t\t\t\tVersion: gvk.Version,\n\t\t\t\t\t\t\tKind: targetObj.GetKind(),\n\t\t\t\t\t\t\tNamespace: targetObj.GetNamespace(),\n\t\t\t\t\t\t\tMessage: fmt.Sprintf(\"Resource %s:%s is not permitted in project %s.\", gvk.Group, gvk.Kind, sc.proj.Name),\n\t\t\t\t\t\t\tStatus: appv1.ResultCodeSyncFailed,\n\t\t\t\t\t\t})\n\t\t\t\t\t\tsuccessful = false\n\t\t\t\t\t}\n\n\t\t\t\t\tif serverRes.Namespaced && !sc.proj.IsDestinationPermitted(appv1.ApplicationDestination{Namespace: targetObj.GetNamespace(), Server: sc.server}) {\n\t\t\t\t\t\tsc.setResourceDetails(&appv1.ResourceResult{\n\t\t\t\t\t\t\tName: targetObj.GetName(),\n\t\t\t\t\t\t\tGroup: gvk.Group,\n\t\t\t\t\t\t\tVersion: gvk.Version,\n\t\t\t\t\t\t\tKind: targetObj.GetKind(),\n\t\t\t\t\t\t\tNamespace: targetObj.GetNamespace(),\n\t\t\t\t\t\t\tMessage: fmt.Sprintf(\"namespace %v is not permitted in project '%s'\", targetObj.GetNamespace(), sc.proj.Name),\n\t\t\t\t\t\t\tStatus: appv1.ResultCodeSyncFailed,\n\t\t\t\t\t\t})\n\t\t\t\t\t\tsuccessful = false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tsyncTask := syncTask{\n\t\t\t\tliveObj: resourceState.Live,\n\t\t\t\ttargetObj: targetObj,\n\t\t\t\tskipDryRun: skipDryRun,\n\t\t\t}\n\t\t\tsyncTasks = append(syncTasks, syncTask)\n\t\t}\n\t}\n\n\tsort.Sort(newKindSorter(syncTasks, resourceOrder))\n\treturn syncTasks, successful\n}", "func (*CanaryOnNamespaceGenerator) IsK8sResourceDuplicated() bool {\n\treturn true\n}", "func FixupDepends(subscriptionID, resourceGroup string, template map[string]interface{}, ignoreMap map[string]struct{}) {\n\tmyResources := map[string]struct{}{}\n\tfor _, resource := range jsonpath.MustCompile(\"$.resources.*\").Get(template) {\n\t\ttyp := jsonpath.MustCompile(\"$.type\").MustGetString(resource)\n\t\tname := jsonpath.MustCompile(\"$.name\").MustGetString(resource)\n\t\tmyResources[resourceid.ResourceID(subscriptionID, resourceGroup, typ, name)] = struct{}{}\n\t}\n\n\tvar recurse func(myResourceID string, i interface{}, dependsMap map[string]struct{})\n\n\t// walk the data structure collecting \"id\" fields whose values look like\n\t// Azure resource IDs. Trim sub-resources from IDs. Ignore IDs that are\n\t// self-referent\n\trecurse = func(myResourceID string, i interface{}, dependsMap map[string]struct{}) {\n\t\tswitch i := i.(type) {\n\t\tcase map[string]interface{}:\n\t\t\tif id, ok := i[\"id\"]; ok {\n\t\t\t\tif id, ok := id.(string); ok {\n\t\t\t\t\tparts := strings.Split(id, \"/\")\n\t\t\t\t\tif len(parts) > 9 {\n\t\t\t\t\t\tparts = parts[:9]\n\t\t\t\t\t}\n\t\t\t\t\tif len(parts) == 9 {\n\t\t\t\t\t\tid = strings.Join(parts, \"/\")\n\t\t\t\t\t\t_, ignoreIt := ignoreMap[id]\n\t\t\t\t\t\tif id != myResourceID && !ignoreIt {\n\t\t\t\t\t\t\tdependsMap[id] = struct{}{}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, v := range i {\n\t\t\t\trecurse(myResourceID, v, dependsMap)\n\t\t\t}\n\t\tcase []interface{}:\n\t\t\tfor _, v := range i {\n\t\t\t\trecurse(myResourceID, v, dependsMap)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, resource := range jsonpath.MustCompile(\"$.resources.*\").Get(template) {\n\t\ttyp := jsonpath.MustCompile(\"$.type\").MustGetString(resource)\n\t\tname := jsonpath.MustCompile(\"$.name\").MustGetString(resource)\n\n\t\tdependsMap := map[string]struct{}{}\n\n\t\t// if we're a child resource, depend on our parent\n\t\tif strings.Count(typ, \"/\") == 2 {\n\t\t\tid := resourceid.ResourceID(subscriptionID, resourceGroup, typ[:strings.LastIndexByte(typ, '/')], name[:strings.LastIndexByte(name, '/')])\n\t\t\tdependsMap[id] = struct{}{}\n\t\t}\n\n\t\trecurse(resourceid.ResourceID(subscriptionID, resourceGroup, typ, name), resource, dependsMap)\n\n\t\tdepends := make([]string, 0, len(dependsMap))\n\t\tfor k := range dependsMap {\n\t\t\tif _, found := myResources[k]; found {\n\t\t\t\tdepends = append(depends, k)\n\t\t\t}\n\t\t}\n\n\t\tif len(depends) > 0 {\n\t\t\tsort.Strings(depends)\n\n\t\t\tjsonpath.MustCompile(\"$.dependsOn\").Set(resource, depends)\n\t\t}\n\t}\n}", "func (c *Canary) CleanPreviousCanaryResources(region schemas.RegionConfig, completeCanary bool) error {\n\tclient, err := selectClientFromList(c.AWSClients, region.Region)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprefix := tool.BuildPrefixName(c.AwsConfig.Name, c.Stack.Env, region.Region)\n\n\tasgList, err := client.EC2Service.GetAllMatchingAutoscalingGroupsWithPrefix(prefix)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, asg := range asgList {\n\t\tif (completeCanary && *asg.AutoScalingGroupName == c.LatestAsg[region.Region]) || !tool.IsStringInArray(*asg.AutoScalingGroupName, c.PrevAsgs[region.Region]) {\n\t\t\tcontinue\n\t\t}\n\n\t\tc.Logger.Debugf(\"[Resizing] target autoscaling group : %s\", *asg.AutoScalingGroupName)\n\t\tif err := c.ResizingAutoScalingGroupCount(client, *asg.AutoScalingGroupName, 0); err != nil {\n\t\t\tc.Logger.Errorf(err.Error())\n\t\t}\n\t\tc.Logger.Debugf(\"Resizing autoscaling group finished: %s\", *asg.AutoScalingGroupName)\n\n\t\tfor _, tg := range asg.TargetGroupARNs {\n\t\t\tif tool.IsCanaryTargetGroupArn(*tg, region.Region) {\n\t\t\t\tc.Logger.Debugf(\"Try to delete target group: %s\", *tg)\n\t\t\t\tif err := client.ELBV2Service.DeleteTargetGroup(tg); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tc.Logger.Debugf(\"Deleted target group: %s\", *tg)\n\t\t\t}\n\t\t}\n\t}\n\n\tc.Logger.Debugf(\"Start to delete load balancer and security group for canary\")\n\tif completeCanary {\n\t\tif err := c.DeleteLoadBalancer(region); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := c.LoadBalancerDeletionChecking(region); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := c.DeleteEC2IngressRules(region); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := c.DeleteEC2SecurityGroup(region); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := c.DeleteLBSecurityGroup(region); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *Client) Push(resources []APIResource) error {\n\tfor _, res := range resources {\n\t\t// REVISIT: maybe we should save updates (and thus zk and\n\t\t// midolman loads) by performing GET and compare first.\n\t\t// Or we can make the MidoNet API detect and ignore no-op updates.\n\t\tresp, body, err := c.post(res)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif resp.StatusCode == 404 || resp.StatusCode == 400 {\n\t\t\t// There are a few cases we can see 404 here.\n\t\t\t// - The resource is HasParent and the parent has not been\n\t\t\t// created yet\n\t\t\t// - The resource has a reference to the other resources (e.g.\n\t\t\t// filter chains for a Bridge) and they have not been created\n\t\t\t// yet\n\t\t\t// Also, MidoNet API returns 400 in a similar cases.\n\t\t\t// - When the port referenced by Route.nextHopPort doesn't exist.\n\t\t\t// (ROUTE_NEXT_HOP_PORT_NOT_NULL)\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"resource\": res,\n\t\t\t}).Info(\"Referent doesn't exist yet?\")\n\t\t\treturn fmt.Errorf(\"Referent doesn't exist yet?\")\n\t\t}\n\t\tif resp.StatusCode == 409 || (resp.StatusCode == 500 && mna1315(res)) {\n\t\t\tif res.Path(\"PUT\") != \"\" {\n\t\t\t\tresp, body, err = c.put(res)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif resp.StatusCode == 409 {\n\t\t\t\t\tif _, ok := res.(*TunnelZone); ok {\n\t\t\t\t\t\t// Workaound for UNIQUE_TUNNEL_ZONE_NAME_TYPE issue.\n\t\t\t\t\t\t// https://midonet.atlassian.net/browse/MNA-1293\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif res.Path(\"GET\") != \"\" {\n\t\t\t\t\texists, err := c.exists(res)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif !exists {\n\t\t\t\t\t\t// assume a transient error\n\t\t\t\t\t\treturn fmt.Errorf(\"Unexpected 409\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// assume 409 meant ok\n\t\t\t\t// REVISIT: confirm that the existing resource is\n\t\t\t\t// same enough as what we want.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif resp.StatusCode/100 != 2 {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"statusCode\": resp.StatusCode,\n\t\t\t\t\"body\": body,\n\t\t\t}).Fatal(\"Unexpected status code\")\n\t\t}\n\t}\n\treturn nil\n}", "func (r *ReconcileGitTrack) deleteResources(leftovers map[string]farosv1alpha1.GitTrackObjectInterface) error {\n\tif len(leftovers) > 0 {\n\t\tr.log.V(0).Info(\"Found leftover resources to clean up\", \"leftover resources\", string(len(leftovers)))\n\t}\n\tfor name, obj := range leftovers {\n\t\tif err := r.Delete(context.TODO(), obj); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to delete child for '%s': '%s'\", name, err)\n\t\t}\n\t\tr.log.V(0).Info(\"Child deleted\", \"child name\", name)\n\t}\n\treturn nil\n}", "func computeResourcesForManifestDeletion(req *http.Request) (types.ResourceList, error) {\n\tinfo, ok := util.ManifestInfoFromContext(req.Context())\n\tif !ok {\n\t\treturn nil, errors.New(\"manifest info missing\")\n\t}\n\n\tblobs, err := dao.GetExclusiveBlobs(info.ProjectID, info.Repository, info.Digest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo.ExclusiveBlobs = blobs\n\n\tvar size int64\n\tfor _, blob := range blobs {\n\t\tif !blob.IsForeignLayer() {\n\t\t\tsize = size + blob.Size\n\t\t}\n\t}\n\n\treturn types.ResourceList{types.ResourceStorage: size}, nil\n}", "func dedupeList(ctx context.Context, f fs.Fs, ht hash.Type, remote string, objs []fs.Object, byHash bool) {\n\tfmt.Printf(\"%s: %d duplicates\\n\", remote, len(objs))\n\tfor i, o := range objs {\n\t\thashValue := \"\"\n\t\tif ht != hash.None {\n\t\t\tvar err error\n\t\t\thashValue, err = o.Hash(ctx, ht)\n\t\t\tif err != nil {\n\t\t\t\thashValue = err.Error()\n\t\t\t}\n\t\t}\n\t\tif byHash {\n\t\t\tfmt.Printf(\" %d: %12d bytes, %s, %s\\n\", i+1, o.Size(), o.ModTime(ctx).Local().Format(\"2006-01-02 15:04:05.000000000\"), o.Remote())\n\t\t} else {\n\t\t\tfmt.Printf(\" %d: %12d bytes, %s, %v %32s\\n\", i+1, o.Size(), o.ModTime(ctx).Local().Format(\"2006-01-02 15:04:05.000000000\"), ht, hashValue)\n\t\t}\n\t}\n}", "func (b *Botanist) DeleteStaleExtensionResources(ctx context.Context) error {\n\twantedExtensionTypes := sets.NewString()\n\tfor _, extension := range b.Shoot.Extensions {\n\t\twantedExtensionTypes.Insert(extension.Spec.Type)\n\t}\n\treturn b.deleteExtensionResources(ctx, wantedExtensionTypes)\n}", "func (sc *syncContext) getSyncTasks() (_ syncTasks, successful bool) {\n\tresourceTasks := syncTasks{}\n\tsuccessful = true\n\n\tfor _, resource := range sc.compareResult.managedResources {\n\t\tif !sc.containsResource(resource) {\n\t\t\tlog.WithFields(log.Fields{\"group\": resource.Group, \"kind\": resource.Kind, \"name\": resource.Name}).\n\t\t\t\tDebug(\"skipping\")\n\t\t\tcontinue\n\t\t}\n\n\t\tobj := obj(resource.Target, resource.Live)\n\n\t\t// this creates garbage tasks\n\t\tif hook.IsHook(obj) {\n\t\t\tlog.WithFields(log.Fields{\"group\": obj.GroupVersionKind().Group, \"kind\": obj.GetKind(), \"namespace\": obj.GetNamespace(), \"name\": obj.GetName()}).\n\t\t\t\tDebug(\"skipping hook\")\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, phase := range syncPhases(obj) {\n\t\t\tresourceTasks = append(resourceTasks, &syncTask{phase: phase, targetObj: resource.Target, liveObj: resource.Live})\n\t\t}\n\t}\n\n\tsc.log.WithFields(log.Fields{\"resourceTasks\": resourceTasks}).Debug(\"tasks from managed resources\")\n\n\thookTasks := syncTasks{}\n\tif !sc.skipHooks() {\n\t\tfor _, obj := range sc.compareResult.hooks {\n\t\t\tfor _, phase := range syncPhases(obj) {\n\t\t\t\t// Hook resources names are deterministic, whether they are defined by the user (metadata.name),\n\t\t\t\t// or formulated at the time of the operation (metadata.generateName). If user specifies\n\t\t\t\t// metadata.generateName, then we will generate a formulated metadata.name before submission.\n\t\t\t\ttargetObj := obj.DeepCopy()\n\t\t\t\tif targetObj.GetName() == \"\" {\n\t\t\t\t\tpostfix := strings.ToLower(fmt.Sprintf(\"%s-%s-%d\", sc.syncRes.Revision[0:7], phase, sc.opState.StartedAt.UTC().Unix()))\n\t\t\t\t\tgenerateName := obj.GetGenerateName()\n\t\t\t\t\ttargetObj.SetName(fmt.Sprintf(\"%s%s\", generateName, postfix))\n\t\t\t\t}\n\n\t\t\t\thookTasks = append(hookTasks, &syncTask{phase: phase, targetObj: targetObj})\n\t\t\t}\n\t\t}\n\t}\n\n\tsc.log.WithFields(log.Fields{\"hookTasks\": hookTasks}).Debug(\"tasks from hooks\")\n\n\ttasks := resourceTasks\n\ttasks = append(tasks, hookTasks...)\n\n\t// enrich target objects with the namespace\n\tfor _, task := range tasks {\n\t\tif task.targetObj == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif task.targetObj.GetNamespace() == \"\" {\n\t\t\t// If target object's namespace is empty, we set namespace in the object. We do\n\t\t\t// this even though it might be a cluster-scoped resource. This prevents any\n\t\t\t// possibility of the resource from unintentionally becoming created in the\n\t\t\t// namespace during the `kubectl apply`\n\t\t\ttask.targetObj = task.targetObj.DeepCopy()\n\t\t\ttask.targetObj.SetNamespace(sc.namespace)\n\t\t}\n\t}\n\n\t// enrich task with live obj\n\tfor _, task := range tasks {\n\t\tif task.targetObj == nil || task.liveObj != nil {\n\t\t\tcontinue\n\t\t}\n\t\ttask.liveObj = sc.liveObj(task.targetObj)\n\t}\n\n\t// enrich tasks with the result\n\tfor _, task := range tasks {\n\t\t_, result := sc.syncRes.Resources.Find(task.group(), task.kind(), task.namespace(), task.name(), task.phase)\n\t\tif result != nil {\n\t\t\ttask.syncStatus = result.Status\n\t\t\ttask.operationState = result.HookPhase\n\t\t\ttask.message = result.Message\n\t\t}\n\t}\n\n\t// check permissions\n\tfor _, task := range tasks {\n\t\tserverRes, err := kube.ServerResourceForGroupVersionKind(sc.disco, task.groupVersionKind())\n\t\tif err != nil {\n\t\t\t// Special case for custom resources: if CRD is not yet known by the K8s API server,\n\t\t\t// skip verification during `kubectl apply --dry-run` since we expect the CRD\n\t\t\t// to be created during app synchronization.\n\t\t\tif apierr.IsNotFound(err) && sc.hasCRDOfGroupKind(task.group(), task.kind()) {\n\t\t\t\tsc.log.WithFields(log.Fields{\"task\": task}).Debug(\"skip dry-run for custom resource\")\n\t\t\t\ttask.skipDryRun = true\n\t\t\t} else {\n\t\t\t\tsc.setResourceResult(task, v1alpha1.ResultCodeSyncFailed, \"\", err.Error())\n\t\t\t\tsuccessful = false\n\t\t\t}\n\t\t} else {\n\t\t\tif !sc.proj.IsResourcePermitted(metav1.GroupKind{Group: task.group(), Kind: task.kind()}, serverRes.Namespaced) {\n\t\t\t\tsc.setResourceResult(task, v1alpha1.ResultCodeSyncFailed, \"\", fmt.Sprintf(\"Resource %s:%s is not permitted in project %s.\", task.group(), task.kind(), sc.proj.Name))\n\t\t\t\tsuccessful = false\n\t\t\t}\n\t\t\tif serverRes.Namespaced && !sc.proj.IsDestinationPermitted(v1alpha1.ApplicationDestination{Namespace: task.namespace(), Server: sc.server}) {\n\t\t\t\tsc.setResourceResult(task, v1alpha1.ResultCodeSyncFailed, \"\", fmt.Sprintf(\"namespace %v is not permitted in project '%s'\", task.namespace(), sc.proj.Name))\n\t\t\t\tsuccessful = false\n\t\t\t}\n\t\t}\n\t}\n\n\tsort.Sort(tasks)\n\n\treturn tasks, successful\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewCreateUserRepository creates new createUserRepository with its dependencies
func NewCreateUserRepository(handler *database.MongoHandler) entity.UserRepositoryCreator { return createUserRepository{ handler: handler, collection: "users", } }
[ "func DiCreateUserRepository(db *gorm.DB) repository.User {\n\treturn psql.CreateUserRepository(db)\n}", "func newUserRepo(db *sql.DB) *userRepo {\n\treturn &userRepo{\n\t\tdb: db,\n\t}\n}", "func CreateUserRepositoryForTests() stored_users.UserRepository {\n\tfileRepository := concrete_files.CreateFileRepositoryForTests()\n\tuserBuilderFactory := CreateUserBuilderFactoryForTests()\n\tout := CreateUserRepository(fileRepository, userBuilderFactory)\n\treturn out\n}", "func UserCreate(ctx context.Context, user *model.User, userRepo model.UserRepository) (string, error) {\n\treqID := utils.GetReqIDFromContext(ctx)\n\tlogger := log.WithFields(log.Fields{\n\t\tconstant.ReqID: reqID,\n\t})\n\tlogger.Debug(\"Enter in UserCreate service\")\n\n\treturn userRepo.Store(ctx, user)\n}", "func NewUserRepository(dbToUse *gorm.DB) UserRepository {\n\t// db, err := gorm.Open(\"sqlite3\", \"test.db\")\n\t// if err != nil {\n\t// panic(\"failed to connect database\")\n\t// }\n\t//defer db.Close()\n\trepository := UserRepository{DB: dbToUse}\n\treturn repository\n}", "func CreateUserRepository() (*UserRepository, error) {\n\tif databaseConnection == nil {\n\t\treturn nil, ErrGormNotInitialized\n\t}\n\treturn &UserRepository{}, nil\n}", "func CreateUser(w http.ResponseWriter, r *http.Request) {\n\trequestBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Fatal(\"Error\")\n\t}\n\n\tvar user models.User\n\tif err = json.Unmarshal(requestBody, &user); err != nil {\n\t\tlog.Fatal(\"Error\")\n\t}\n\n\tdb, err := database.OpenDbConnection()\n\tif err != nil {\n\t\tlog.Fatal(\"error\")\n\t}\n\n\trepository := repositories.UserRepository(db)\n\trepository.Create(user)\n}", "func UserCreate(w http.ResponseWriter, r *http.Request) {\n\n\t// Init output\n\toutput := []byte(\"\")\n\n\t// Add content type header to the response\n\tcontentType := \"application/json\"\n\tcharset := \"utf-8\"\n\tw.Header().Add(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Grab url path variables\n\turlVars := mux.Vars(r)\n\turlUser := urlVars[\"user\"]\n\n\t// Grab context references\n\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\trefUserUUID := gorillaContext.Get(r, \"auth_user_uuid\").(string)\n\n\t// Read POST JSON body\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\terr := APIErrorInvalidRequestBody()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Parse pull options\n\tpostBody, err := auth.GetUserFromJSON(body)\n\tif err != nil {\n\t\terr := APIErrorInvalidArgument(\"User\")\n\t\trespondErr(w, err)\n\t\tlog.Error(string(body[:]))\n\t\treturn\n\t}\n\n\tuuid := uuid.NewV4().String() // generate a new uuid to attach to the new project\n\ttoken, err := auth.GenToken() // generate a new user token\n\tcreated := time.Now().UTC()\n\t// Get Result Object\n\tres, err := auth.CreateUser(uuid, urlUser, postBody.FirstName, postBody.LastName, postBody.Organization, postBody.Description,\n\t\tpostBody.Projects, token, postBody.Email, postBody.ServiceRoles, created, refUserUUID, refStr)\n\n\tif err != nil {\n\t\tif err.Error() == \"exists\" {\n\t\t\terr := APIErrorConflict(\"User\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tif strings.HasPrefix(err.Error(), \"duplicate\") {\n\t\t\terr := APIErrorInvalidData(err.Error())\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tif strings.HasPrefix(err.Error(), \"invalid\") {\n\t\t\terr := APIErrorInvalidData(err.Error())\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\n\t\terr := APIErrGenericInternal(err.Error())\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Output result to JSON\n\tresJSON, err := res.ExportJSON()\n\tif err != nil {\n\t\terr := APIErrExportJSON()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Write response\n\toutput = []byte(resJSON)\n\trespondOK(w, output)\n\n}", "func testCreateUser(s services.UserService, t *testing.T) {\n\tt.Run(\"createUser\", func(t *testing.T) {\n\t\t// Declares the input parameters for creating a user\n\t params := services.UserInputDTO{Name: \"Giggi\"}\n\t\n\t\t// [Step 1 - EXECUTE]\n\t\t// Execute the method to create a new user\n\t\tuser, err := s.Create(params)\n\t\t\n\t\t// [Step 2 - CHECK]\n\t\t// Check that the previous operation was successful\n\t\tif err != nil {\n\t\t t.Error(err)\n\t\t\treturn\n\t\t}\n\t\t\n\t\t// [Step 3 - CHECK]\n\t\t// Check that the user ID has been assigned\n\t\tif user.Id == 0 {\n\t\t\tt.Error(\"User Id not assigned\")\n\t\t\treturn\n\t\t}\n\t\t\n\t\t// [Step 4 - CHECK]\n\t\t// Check if the user name is as expected\n\t\tif user.Name != params.Name {\n\t\t\tt.Errorf(\"Expected %s as Name value. Found: %s\", params.Name, user.Name)\n\t\t\treturn\n\t\t}\n\t\t\n\t\t// [Step 5 - EXECUTE]\n\t\t// Requests the repository to provide the user it has just\n\t\t// created, to check that he or she is actually present in it\n\t\tuser, err = s.GetById(user.Id)\n\t\t\n\t\t// [Step 6 - CHECK]\n\t\t// Check that the previous operation was successful\n\t\t// (note: the NOT FOUND error is an expected error)\n\t\tif err != nil && !strings.Contains(err.Error(), \"not found\") {\n\t\t t.Error(err)\n\t\t\treturn\n\t\t}\n\t\t\n\t\t// [Step 6 - CHECK]\n\t\t// Check that the user has been returned successfully\n\t\tif user == nil {\n\t\t t.Error(\"User not stored\")\n\t\t\treturn\n\t\t}\n\t})\n}", "func (m *MockIUserRepository) CreateNewUser() *model.User {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateNewUser\")\n\tret0, _ := ret[0].(*model.User)\n\treturn ret0\n}", "func CreateUserRepoImpl(db *gorm.DB) user.UserRepository {\n\treturn &UserRepoImpl{db}\n}", "func NewTestRepository() user.IRepository {\n\tlog.Println(\"Create users table...\")\n\n\trep := &testRepo{\n\t\tusers: make([]*user.User, 0),\n\t}\n\n\treturn rep\n}", "func (m MockRepositoryStore) CreateRepository(ctx context.Context, repositoryID int64, virtualStorage, relativePath, replicaPath, primary string, updatedSecondaries, outdatedSecondaries []string, storePrimary, storeAssignments bool) error {\n\tif m.CreateRepositoryFunc == nil {\n\t\treturn nil\n\t}\n\n\treturn m.CreateRepositoryFunc(ctx, repositoryID, virtualStorage, relativePath, replicaPath, primary, updatedSecondaries, outdatedSecondaries, storePrimary, storeAssignments)\n}", "func (factory Factory) Create(projectName string) (applications.GitRepo, error) {\n\n\t//select repo\n\trepoType, err := factory.prompt.forGitRepository()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif repoType == constants.ADOS {\n\t\treturn ados.NewRepository(ados.NewPrompts()), nil\n\t}\n\tif repoType == constants.Github {\n\t\treturn github.NewGithubRepo(github.NewPrompts()), nil\n\t}\n\n\t//construct\n\treturn gitlab.NewGitlabRepo(gitlab.NewPrompts()), nil\n}", "func (server Server) CreateNewUser(w http.ResponseWriter, r *http.Request) {\n\tvar user models.User // make a user\n\tvar res models.APIResponse // make a response\n\n\terr := json.NewDecoder(r.Body).Decode(&user) //decode the user\n\tif err != nil {\n\t\tlog.Printf(\"Unable to decode the request body. %v\", err)\n\t\tres = models.BuildAPIResponseFail(\"Unable to decode the request body\", nil)\n\t}\n\tif user.Name == \"\" || user.Email == \"\" || user.Password == \"\" {\n\t\tres = models.BuildAPIResponseFail(\"Blank users cannot be created\", nil)\n\t} else {\n\t\tinsertID := insertUser(user, server.db) // call insert user function and pass the note\n\t\tres = models.BuildAPIResponseSuccess(fmt.Sprintf(\"User Created with %d id\", insertID), nil) // format a response object\n\t}\n\tjson.NewEncoder(w).Encode(res)\n\n}", "func ProjectUserCreate(w http.ResponseWriter, r *http.Request) {\n\n\t// Init output\n\toutput := []byte(\"\")\n\n\t// Add content type header to the response\n\tcontentType := \"application/json\"\n\tcharset := \"utf-8\"\n\tw.Header().Add(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Grab url path variables\n\turlVars := mux.Vars(r)\n\turlUser := urlVars[\"user\"]\n\n\t// Grab context references\n\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\trefUserUUID := gorillaContext.Get(r, \"auth_user_uuid\").(string)\n\trefProjUUID := gorillaContext.Get(r, \"auth_project_uuid\").(string)\n\n\t// Read POST JSON body\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\terr := APIErrorInvalidRequestBody()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Parse pull options\n\tpostBody, err := auth.GetUserFromJSON(body)\n\tif err != nil {\n\t\terr := APIErrorInvalidArgument(\"User\")\n\t\trespondErr(w, err)\n\t\tlog.Error(string(body[:]))\n\t\treturn\n\t}\n\n\t// omit service wide roles\n\tpostBody.ServiceRoles = []string{}\n\n\t// allow the user to be created to only have reference to the project under which is being created\n\tprName := projects.GetNameByUUID(refProjUUID, refStr)\n\tif prName == \"\" {\n\t\terr := APIErrGenericInternal(\"Internal Error\")\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\tprojectRoles := auth.ProjectRoles{}\n\n\tfor _, p := range postBody.Projects {\n\t\tif p.Project == prName {\n\t\t\tprojectRoles.Project = prName\n\t\t\tprojectRoles.Roles = p.Roles\n\t\t\tprojectRoles.Topics = p.Topics\n\t\t\tprojectRoles.Subs = p.Subs\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// if the project was not mentioned in the creation, add it\n\tif projectRoles.Project == \"\" {\n\t\tprojectRoles.Project = prName\n\t}\n\n\tpostBody.Projects = []auth.ProjectRoles{projectRoles}\n\n\tuuid := uuid.NewV4().String() // generate a new uuid to attach to the new project\n\ttoken, err := auth.GenToken() // generate a new user token\n\tcreated := time.Now().UTC()\n\n\t// Get Result Object\n\tres, err := auth.CreateUser(uuid, urlUser, \"\", \"\", \"\", \"\", postBody.Projects, token, postBody.Email, postBody.ServiceRoles, created, refUserUUID, refStr)\n\n\tif err != nil {\n\t\tif err.Error() == \"exists\" {\n\t\t\terr := APIErrorConflict(\"User\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tif strings.HasPrefix(err.Error(), \"invalid\") {\n\t\t\terr := APIErrorInvalidData(err.Error())\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tif strings.HasPrefix(err.Error(), \"duplicate\") {\n\t\t\terr := APIErrorInvalidData(err.Error())\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\n\t\terr := APIErrGenericInternal(err.Error())\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Output result to JSON\n\tresJSON, err := res.ExportJSON()\n\tif err != nil {\n\t\terr := APIErrExportJSON()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Write response\n\toutput = []byte(resJSON)\n\trespondOK(w, output)\n\n}", "func New() UsersRepository {\n\treturn &usersRepository{}\n}", "func New(db *sql.DB) repositories.UserRepository {\n\treturn &UserRepository{db: db}\n}", "func (c *Repo) CreateRepo(w http.ResponseWriter, req *http.Request) (int, interface{}) {\n\tsctx := req.Context().Value(middlewares.SessionKey)\n\tsession, ok := sctx.(*types.Session)\n\tif !ok {\n\t\tfmt.Println(\"cant convert to session\", sctx)\n\t\treturn httputils.InternalError()\n\t}\n\n\trepo := types.Repo{}\n\n\terr := json.NewDecoder(req.Body).Decode(&repo)\n\tif err != nil {\n\t\treturn httputils.InternalError()\n\t}\n\n\trepo.Uname = session.User.Uname\n\trepo.OwnerID = session.User.ID\n\n\terr = repos.CreateBareRepo(&repo)\n\tif err != nil {\n\t\treturn http.StatusConflict, httputils.ErrorResponse([]string{\"Repo already exists\"})\n\t}\n\n\treturn http.StatusOK, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
syncBucket syncs the local gallery with the bucket
func syncBucket(name string) { objects, err := s3.ListBucket(name, "") if err != nil { log.Printf("Can't list objects in bucket: %v", err) } // download each file that we don't have yet locally: for _, object := range objects { localfile := filepath.Join(galleryPath, object) if _, err := os.Stat(localfile); err != nil { err = s3.DownloadFromBucket("gallery", object, localfile) if err != nil { log.Printf("Can't download file %v: %v", object, err) } } } log.Printf("Done downloading missing files from bucket %v", name) // upload metadata files that are not yet remote: files, err := ioutil.ReadDir(galleryPath) if err != nil { log.Printf("Can't scan local gallery for metadata: %v", err) return } for _, f := range files { if strings.HasSuffix(f.Name(), "meta") { if !has(objects, f.Name()) { err = s3.UploadToBucket("gallery", f.Name(), filepath.Join(galleryPath, f.Name()), "text/plain") if err != nil { log.Printf("Can't upload metadata file %v: %v", f.Name(), err) } } } } }
[ "func syncBucket(name string) {\n\terr := s3.CreateBucket(name)\n\tif err != nil {\n\t\tlog.Printf(\"Can't create bucket: %v\", err)\n\t}\n\tfor {\n\t\tobjects, err := s3.ListBucket(name, \"\")\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Can't list objects in bucket: %v\", err)\n\t\t}\n\t\t// download each file that we don't have yet locally:\n\t\tfor _, object := range objects {\n\t\t\tlocalfile := filepath.Join(galleryPath, object)\n\t\t\tif _, err := os.Stat(localfile); err != nil {\n\t\t\t\terr = s3.DownloadFromBucket(\"gallery\", object, localfile)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"Can't download file %v: %v\", object, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tlog.Printf(\"Done downloading missing files from bucket %v\", name)\n\n\t\t// upload image files that are not yet remote:\n\t\tfiles, err := ioutil.ReadDir(galleryPath)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Can't scan local gallery for image files: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tfor _, f := range files {\n\t\t\tswitch {\n\t\t\tcase strings.HasSuffix(f.Name(), \"jpg\"), strings.HasSuffix(f.Name(), \"jpeg\"):\n\t\t\t\tif !has(objects, f.Name()) {\n\t\t\t\t\terr = s3.UploadToBucket(\"gallery\", f.Name(), filepath.Join(galleryPath, f.Name()), \"image/jpeg\")\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Printf(\"Can't upload metadata file %v: %v\", f.Name(), err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase strings.HasSuffix(f.Name(), \"png\"):\n\t\t\t\tif !has(objects, f.Name()) {\n\t\t\t\t\terr = s3.UploadToBucket(\"gallery\", f.Name(), filepath.Join(galleryPath, f.Name()), \"image/png\")\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Printf(\"Can't upload metadata file %v: %v\", f.Name(), err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(10 * time.Second)\n\t}\n}", "func Sync(t time.Time, log *zap.Logger) {\n\tlog.Info(\"S3 sync begun\")\n\n\tbucket := Config.bucket\n\tregion := Config.bucketRegion\n\n\tsvc := setUpAwsSession(region)\n\tresp, err := listBucket(bucket, region, svc, log)\n\tif err != nil {\n\t\tlog.Error(\"failed to list bucket\",\n\t\t\tzap.Error(err),\n\t\t\tzap.String(\"bucket\", bucket),\n\t\t\tzap.String(\"region\", region),\n\t\t)\n\n\t\treturn\n\t}\n\n\tif err := parseAllFiles(resp, bucket, svc, log); err != nil {\n\t\treturn\n\t}\n\n\tmu.Lock()\n\tdefer mu.Unlock()\n\tUp = true\n\tHealth = true\n\n\tlog.Info(\"S3 sync finished\")\n}", "func (b *Bucket) Bucket(name string) *Bucket { panic(\"n/i\") }", "func (h *S3Handler) uploadToS3(p *Project, wg *sync.WaitGroup) {\n\n\tdefer recovery()\n\t// get image content\n\tdefer wg.Done()\n\tvar arr []string\n\n\tfor _, url := range(p.Image) {\n\n\t\tbuffer:= img(url)\n\t\tif len(*buffer) > 0 {\n\t\t\t// upload to s3\n\n\t\t\ts := strings.Split(url, \"/\")\n\t\t\turl_key := s[len(s)-1]\n\n\t\t\tmyurl := fmt.Sprintf(\"https://%s.s3.ap-south-1.amazonaws.com/%s\", S3_BUCKET, url_key)\n\n\t\t\t_, err := s3.New(h.Session).PutObject(&s3.PutObjectInput{\n\n\t\t\t\tBucket: aws.String(S3_BUCKET),\n\t\t\t\tKey: aws.String(url_key),\n\t\t\t\tACL: aws.String(\"public-read\"),\n\t\t\t\tBody: bytes.NewReader(*buffer),\n\t\t\t\tContentLength: aws.Int64(int64(len(*buffer))),\n\t\t\t\tContentType: aws.String(http.DetectContentType(*buffer)),\n\t\t\t\tContentDisposition: aws.String(\"attachment\"),\n\t\t\t})\n\n\t\t\tif err != nil{\n\t\t\t\tpanic(\"********************************************************************************************************************************************\")\n\n\n\t\t\t}else {\n\t\t\t\tarr = append(arr, myurl)\n\t\t\t\t//fmt.Println(akki)\n\n\t\t\t}\n\t\t}\n\n\n\t}\n\n\t// update to mongodb\n\tobjID, _ := primitive.ObjectIDFromHex(p.ID)\n\n\tfilter := bson.M{\"_id\": bson.M{\"$eq\": objID}}\n\n\n\tupdate := bson.M{\n\t\t\"$set\": bson.M{\n\t\t\t\"new_image_urlss\": arr,\n\n\t\t},\n\t}\n\tupdateResult, _ := h.Collection.UpdateOne(context.TODO(), filter, update)\n\n\tfmt.Println(updateResult)\n\n\n}", "func patchBucket(w http.ResponseWriter, r *http.Request) {\n\n\tpathParams := mux.Vars(r)\n\tsageBucketID := pathParams[\"bucket\"]\n\t//objectKey := pathParams[\"key\"]\n\n\tvars := mux.Vars(r)\n\tusername := vars[\"username\"]\n\n\t//rawQuery := r.URL.RawQuery\n\n\t// normal bucket metadata\n\n\tallowed, err := userHasBucketPermission(username, sageBucketID, \"FULL_CONTROL\")\n\tif err != nil {\n\t\trespondJSONError(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\tif !allowed {\n\t\trespondJSONError(w, http.StatusUnauthorized, \"Write access to bucket metadata denied (%s, %s)\", username, sageBucketID)\n\t\treturn\n\t}\n\n\tvar deltaBucket map[string]string\n\n\terr = json.NewDecoder(r.Body).Decode(&deltaBucket)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Could not parse json: %s\", err.Error())\n\t\trespondJSONError(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\n\tlog.Printf(\"got: %v\", deltaBucket)\n\n\tdb, err := sql.Open(\"mysql\", mysqlDSN)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Unable to connect to database: %v\", err)\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\tnewBucketname, ok := deltaBucket[\"name\"]\n\tif ok {\n\n\t\tinsertQueryStr := \"UPDATE Buckets SET name=? WHERE id=UUID_TO_BIN(?) ;\"\n\t\t_, err = db.Exec(insertQueryStr, newBucketname, sageBucketID)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Bucket creation in mysql failed: %s\", err.Error())\n\t\t\treturn\n\t\t}\n\n\t}\n\n\t// return should return real bucket\n\n\tnewBucket, err := GetSageBucket(sageBucketID)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"GetSageBucket returned: %s\", err.Error())\n\t\trespondJSONError(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\n\trespondJSON(w, http.StatusOK, newBucket)\n\t//bucket fields:\n\t//metadata , name , type, (owner, change requires permission change)\n\n\t//permission\n\n}", "func syncRepo(repo, dest, branch, rev string, depth int) error {\n\ttarget := path.Join(volMount, dest)\n\tgitRepoPath := path.Join(target, \".git\")\n\t_, err := os.Stat(gitRepoPath)\n\tswitch {\n\tcase os.IsNotExist(err):\n\t\terr = initRepo(repo, target, branch, rev, depth)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase err != nil:\n\t\treturn fmt.Errorf(\"error checking if repo exist %q: %v\", gitRepoPath, err)\n\tdefault:\n\t\tneedUpdate, err := gitRemoteChanged(target, branch)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !needUpdate {\n\t\t\tlog.Printf(\"No change\")\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn addWorktreeAndSwap(dest, branch, rev)\n}", "func UpdateBucket(context *gin.Context) {\n\t// ......\n}", "func Initrepo(server string, secure bool, accesskey string, secretkey string, enckey string, bucketname string, dir string) bool {\n\t// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically\n\t// determined based on the Endpoint value.\n\ts3Client, err := minio.New(server, accesskey, secretkey, secure)\n\tif err != nil {\n\t\tjc.SendString(fmt.Sprintln(err))\n\t\treturn false\n\t}\n\n\tfound, err := s3Client.BucketExists(bucketname)\n\tif err != nil {\n\t\tjc.SendString(fmt.Sprintln(err))\n\t\treturn false\n\t}\n\n\tif found {\n\t\tjc.SendString(\"Bucket exists.\")\n\t} else {\n\t\tjc.SendString(\"Creating bucket.\")\n\t\terr = s3Client.MakeBucket(bucketname, \"us-east-1\")\n\t\tif err != nil {\n\t\t\tjc.SendString(fmt.Sprintln(err))\n\t\t\treturn false\n\t\t}\n\t}\n\tvar strs []string\n\tslash := dir[len(dir)-1:]\n\tif slash != \"/\" {\n\t\tstrs = append(strs, dir)\n\t\tstrs = append(strs, \"/\")\n\t\tdir = strings.Join(strs, \"\")\n\t}\n\tvar dbname []string\n\tvar dbnameLocal []string\n\tdbname = append(dbname, bucketname)\n\tdbname = append(dbname, \".db\")\n\tdbnameLocal = append(dbnameLocal, dir)\n\tdbnameLocal = append(dbnameLocal, \".\")\n\tdbnameLocal = append(dbnameLocal, strings.Join(dbname, \"\"))\n\t// check if dir exists, create if not\n\tbasedir := filepath.Dir(strings.Join(dbnameLocal, \"\"))\n\tos.MkdirAll(basedir, os.ModePerm)\n\n\t// create empty repository\n\tfile, err := os.Create(strings.Join(dbnameLocal, \"\"))\n\tdefer file.Close()\n\tif err != nil {\n\t\tjc.SendString(fmt.Sprintln(err))\n\t\treturn false\n\t}\n\tdbuploadlist := make(map[string]string)\n\t// add these files to the upload list\n\tdbuploadlist[strings.Join(dbname, \"\")] = strings.Join(dbnameLocal, \"\")\n\tfailedUploads, err := Upload(server, 443, secure, accesskey, secretkey, enckey, dbuploadlist, bucketname)\n\tif err != nil {\n\t\tfor _, hash := range failedUploads {\n\t\t\tjc.SendString(fmt.Sprintln(\"Failed to upload: \", hash))\n\t\t}\n\t\treturn false\n\t}\n\n\terr = os.Remove(strings.Join(dbnameLocal, \"\"))\n\tif err != nil {\n\t\tjc.SendString(fmt.Sprintln(\"Error deleting database!\", err))\n\t}\n\treturn true\n\n}", "func (s *Server) upload(w http.ResponseWriter, r *http.Request) {\n\tbuckets, err := s.conn.Buckets()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\ts.render(w, r, \"upload.html\", pongo2.Context{\n\t\t\"icon\": \"upload\",\n\t\t\"title\": \"Upload\",\n\t\t\"subtitle\": \"Transfer files to Sprise\",\n\t\t\"buckets\": buckets,\n\t})\n}", "func (s *synchronizer) restoreBucket(ctx context.Context, bucketSlug string) error {\n\n\tlocalBucket, err := s.getBucket(ctx, bucketSlug)\n\tif err != nil {\n\t\tlog.Error(\"Error in getBucket\", err)\n\t\treturn err\n\t}\n\n\tmirrorBucket, err := s.getMirrorBucket(ctx, bucketSlug)\n\tif err != nil {\n\t\tlog.Error(\"Error in getMirrorBucket\", err)\n\t\treturn err\n\t}\n\n\titerator := func(c context.Context, b *bucket.Bucket, itemPath string) error {\n\t\texists, _ := localBucket.FileExists(c, itemPath)\n\n\t\tif exists {\n\t\t\tnewerBucket, err := s.newerBucketPath(c, localBucket, mirrorBucket, itemPath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif newerBucket == localBucket {\n\t\t\t\t// do not overwrite: mirror is not newer\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tbucketModel, err := s.model.FindBucket(ctx, bucketSlug)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\titem, err := mirrorBucket.ListDirectory(ctx, itemPath)\n\t\tif s.eventNotifier != nil && err == nil {\n\t\t\tinfo := utils.MapDirEntryToFileInfo(api_buckets_pb.ListPathResponse(*item), itemPath)\n\t\t\tinfo.BackedUp = true\n\t\t\tinfo.LocallyAvailable = exists\n\t\t\tinfo.RestoreInProgress = true\n\t\t\ts.eventNotifier.SendFileEvent(events.NewFileEvent(info, events.FileRestoring, bucketSlug, bucketModel.DbID))\n\t\t}\n\n\t\ts.NotifyFileRestore(bucketSlug, itemPath)\n\t\treturn nil\n\t}\n\n\tif _, err = mirrorBucket.Each(ctx, \"\", iterator, true); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *synchronizer) restoreBucket(ctx context.Context, bucketSlug string) error {\n\n\tlocalBucket, err := s.getBucket(ctx, bucketSlug)\n\tif err != nil {\n\t\tlog.Error(\"Error in getBucket\", err)\n\t\treturn err\n\t}\n\n\tmirrorBucket, err := s.getMirrorBucket(ctx, bucketSlug)\n\tif err != nil {\n\t\tlog.Error(\"Error in getMirrorBucket\", err)\n\t\treturn err\n\t}\n\n\titerator := func(c context.Context, b *bucket.Bucket, itemPath string) error {\n\t\texists, err := localBucket.FileExists(c, itemPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif exists {\n\t\t\tlocalUpdatedAt, err := localBucket.UpdatedAt(c, itemPath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tmirrorUpdatedAt, err := mirrorBucket.UpdatedAt(c, itemPath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif localUpdatedAt >= mirrorUpdatedAt {\n\t\t\t\t// do not overwrite: mirror is not newer\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tif s.eventNotifier != nil {\n\t\t\ts.eventNotifier.SendFileEvent(events.NewFileEvent(itemPath, bucketSlug, events.FileRestoring, nil))\n\t\t}\n\n\t\ts.NotifyFileRestore(bucketSlug, itemPath)\n\t\treturn nil\n\t}\n\n\tif _, err = mirrorBucket.Each(ctx, \"\", iterator, true); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (bm *BucketManager) InsertBucket(settings *BucketSettings) error {\n\tposts := url.Values{}\n\tposts.Add(\"name\", settings.Name)\n\tif settings.Type == Couchbase {\n\t\tposts.Add(\"bucketType\", \"couchbase\")\n\t} else if settings.Type == Memcached {\n\t\tposts.Add(\"bucketType\", \"memcached\")\n\t} else if settings.Type == Ephemeral {\n\t\tposts.Add(\"bucketType\", \"ephemeral\")\n\t} else {\n\t\tpanic(\"Unrecognized bucket type.\")\n\t}\n\tif settings.FlushEnabled {\n\t\tposts.Add(\"flushEnabled\", \"1\")\n\t} else {\n\t\tposts.Add(\"flushEnabled\", \"0\")\n\t}\n\tposts.Add(\"replicaNumber\", fmt.Sprintf(\"%d\", settings.Replicas))\n\tposts.Add(\"authType\", \"sasl\")\n\tposts.Add(\"saslPassword\", settings.Password)\n\tposts.Add(\"ramQuotaMB\", fmt.Sprintf(\"%d\", settings.Quota))\n\n\tdata := []byte(posts.Encode())\n\n\treq := &gocbcore.HttpRequest{\n\t\tService: gocbcore.ServiceType(MgmtService),\n\t\tPath: \"/pools/default/buckets\",\n\t\tMethod: \"POST\",\n\t\tBody: data,\n\t\tContentType: \"application/x-www-form-urlencoded\",\n\t}\n\n\tresp, err := bm.httpClient.DoHttpRequest(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode != 202 {\n\t\tdata, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = resp.Body.Close()\n\t\tif err != nil {\n\t\t\tlogDebugf(\"Failed to close socket (%s)\", err)\n\t\t}\n\t\treturn networkError{statusCode: resp.StatusCode, message: string(data)}\n\t}\n\n\treturn nil\n}", "func (p *PBM) ResyncStorage(l *log.Event) error {\n\tstg, err := p.GetStorage(l)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to get backup store\")\n\t}\n\n\t_, err = stg.FileStat(StorInitFile)\n\tif errors.Is(err, storage.ErrNotExist) {\n\t\terr = stg.Save(StorInitFile, bytes.NewBufferString(version.DefaultInfo.Version), 0)\n\t}\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"init storage\")\n\t}\n\n\tbcps, err := stg.List(\"\", MetadataFileSuffix)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"get a backups list from the storage\")\n\t}\n\n\terr = p.moveCollection(BcpCollection, BcpOldCollection)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"copy current backups meta from %s to %s\", BcpCollection, BcpOldCollection)\n\t}\n\terr = p.moveCollection(PITRChunksCollection, PITRChunksOldCollection)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"copy current pitr meta from %s to %s\", PITRChunksCollection, PITRChunksOldCollection)\n\t}\n\n\tif len(bcps) == 0 {\n\t\treturn nil\n\t}\n\n\tvar ins []interface{}\n\tfor _, b := range bcps {\n\t\td, err := stg.SourceReader(b.Name)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"read meta for %v\", b.Name)\n\t\t}\n\n\t\tv := BackupMeta{}\n\t\terr = json.NewDecoder(d).Decode(&v)\n\t\td.Close()\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"unmarshal backup meta\")\n\t\t}\n\t\terr = checkBackupFiles(&v, stg)\n\t\tif err != nil {\n\t\t\tl.Warning(\"skip snapshot %s: %v\", v.Name, err)\n\t\t\tcontinue\n\t\t}\n\t\tins = append(ins, v)\n\t}\n\t_, err = p.Conn.Database(DB).Collection(BcpCollection).InsertMany(p.ctx, ins)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"insert retrieved backups meta\")\n\t}\n\n\tpitrf, err := stg.List(PITRfsPrefix, \"\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"get list of pitr chunks\")\n\t}\n\tif len(pitrf) == 0 {\n\t\treturn nil\n\t}\n\n\tvar pitr []interface{}\n\tfor _, f := range pitrf {\n\t\t_, err := stg.FileStat(PITRfsPrefix + \"/\" + f.Name)\n\t\tif err != nil {\n\t\t\tl.Warning(\"skip pitr chunk %s/%s because of %v\", PITRfsPrefix, f.Name, err)\n\t\t\tcontinue\n\t\t}\n\t\tchnk := PITRmetaFromFName(f.Name)\n\t\tif chnk != nil {\n\t\t\tpitr = append(pitr, chnk)\n\t\t}\n\t}\n\n\tif len(pitr) == 0 {\n\t\treturn nil\n\t}\n\n\t_, err = p.Conn.Database(DB).Collection(PITRChunksCollection).InsertMany(p.ctx, pitr)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"insert retrieved pitr meta\")\n\t}\n\n\treturn nil\n}", "func (c *SiteReplicationSys) syncLocalToPeers(ctx context.Context) error {\n\t// If local has buckets, enable versioning on them, create them on peers\n\t// and setup replication rules.\n\tobjAPI := newObjectLayerFn()\n\tif objAPI == nil {\n\t\treturn errSRObjectLayerNotReady\n\t}\n\tbuckets, err := objAPI.ListBuckets(ctx)\n\tif err != nil {\n\t\treturn errSRBackendIssue(err)\n\t}\n\tfor _, bucketInfo := range buckets {\n\t\tbucket := bucketInfo.Name\n\n\t\t// MinIO does not store bucket location - so we just check if\n\t\t// object locking is enabled.\n\t\tlockConfig, err := globalBucketMetadataSys.GetObjectLockConfig(bucket)\n\t\tif err != nil {\n\t\t\tif _, ok := err.(BucketObjectLockConfigNotFound); !ok {\n\t\t\t\treturn errSRBackendIssue(err)\n\t\t\t}\n\t\t}\n\n\t\tvar opts BucketOptions\n\t\tif lockConfig != nil {\n\t\t\topts.LockEnabled = lockConfig.ObjectLockEnabled == \"Enabled\"\n\t\t}\n\n\t\t// Now call the MakeBucketHook on existing bucket - this will\n\t\t// create buckets and replication rules on peer clusters.\n\t\terr = c.MakeBucketHook(ctx, bucket, opts)\n\t\tif err != nil {\n\t\t\treturn errSRBucketConfigError(err)\n\t\t}\n\n\t\t// Replicate bucket policy if present.\n\t\tpolicy, err := globalPolicySys.Get(bucket)\n\t\tfound := true\n\t\tif _, ok := err.(BucketPolicyNotFound); ok {\n\t\t\tfound = false\n\t\t} else if err != nil {\n\t\t\treturn errSRBackendIssue(err)\n\t\t}\n\t\tif found {\n\t\t\tpolicyJSON, err := json.Marshal(policy)\n\t\t\tif err != nil {\n\t\t\t\treturn wrapSRErr(err)\n\t\t\t}\n\t\t\terr = c.BucketMetaHook(ctx, madmin.SRBucketMeta{\n\t\t\t\tType: madmin.SRBucketMetaTypePolicy,\n\t\t\t\tBucket: bucket,\n\t\t\t\tPolicy: policyJSON,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn errSRBucketMetaError(err)\n\t\t\t}\n\t\t}\n\n\t\t// Replicate bucket tags if present.\n\t\ttags, err := globalBucketMetadataSys.GetTaggingConfig(bucket)\n\t\tfound = true\n\t\tif _, ok := err.(BucketTaggingNotFound); ok {\n\t\t\tfound = false\n\t\t} else if err != nil {\n\t\t\treturn errSRBackendIssue(err)\n\t\t}\n\t\tif found {\n\t\t\ttagCfg, err := xml.Marshal(tags)\n\t\t\tif err != nil {\n\t\t\t\treturn wrapSRErr(err)\n\t\t\t}\n\t\t\ttagCfgStr := base64.StdEncoding.EncodeToString(tagCfg)\n\t\t\terr = c.BucketMetaHook(ctx, madmin.SRBucketMeta{\n\t\t\t\tType: madmin.SRBucketMetaTypeTags,\n\t\t\t\tBucket: bucket,\n\t\t\t\tTags: &tagCfgStr,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn errSRBucketMetaError(err)\n\t\t\t}\n\t\t}\n\n\t\t// Replicate object-lock config if present.\n\t\tobjLockCfg, err := globalBucketMetadataSys.GetObjectLockConfig(bucket)\n\t\tfound = true\n\t\tif _, ok := err.(BucketObjectLockConfigNotFound); ok {\n\t\t\tfound = false\n\t\t} else if err != nil {\n\t\t\treturn errSRBackendIssue(err)\n\t\t}\n\t\tif found {\n\t\t\tobjLockCfgData, err := xml.Marshal(objLockCfg)\n\t\t\tif err != nil {\n\t\t\t\treturn wrapSRErr(err)\n\t\t\t}\n\t\t\tobjLockStr := base64.StdEncoding.EncodeToString(objLockCfgData)\n\t\t\terr = c.BucketMetaHook(ctx, madmin.SRBucketMeta{\n\t\t\t\tType: madmin.SRBucketMetaTypeObjectLockConfig,\n\t\t\t\tBucket: bucket,\n\t\t\t\tTags: &objLockStr,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn errSRBucketMetaError(err)\n\t\t\t}\n\t\t}\n\n\t\t// Replicate existing bucket bucket encryption settings\n\t\tsseConfig, err := globalBucketMetadataSys.GetSSEConfig(bucket)\n\t\tfound = true\n\t\tif _, ok := err.(BucketSSEConfigNotFound); ok {\n\t\t\tfound = false\n\t\t} else if err != nil {\n\t\t\treturn errSRBackendIssue(err)\n\t\t}\n\t\tif found {\n\t\t\tsseConfigData, err := xml.Marshal(sseConfig)\n\t\t\tif err != nil {\n\t\t\t\treturn wrapSRErr(err)\n\t\t\t}\n\t\t\tsseConfigStr := base64.StdEncoding.EncodeToString(sseConfigData)\n\t\t\terr = c.BucketMetaHook(ctx, madmin.SRBucketMeta{\n\t\t\t\tType: madmin.SRBucketMetaTypeSSEConfig,\n\t\t\t\tBucket: bucket,\n\t\t\t\tSSEConfig: &sseConfigStr,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn errSRBucketMetaError(err)\n\t\t\t}\n\t\t}\n\t}\n\n\t{\n\t\t// Replicate IAM policies on local to all peers.\n\t\tallPolicies, err := globalIAMSys.ListPolicies(ctx, \"\")\n\t\tif err != nil {\n\t\t\treturn errSRBackendIssue(err)\n\t\t}\n\n\t\tfor pname, policy := range allPolicies {\n\t\t\tpolicyJSON, err := json.Marshal(policy)\n\t\t\tif err != nil {\n\t\t\t\treturn wrapSRErr(err)\n\t\t\t}\n\t\t\terr = c.IAMChangeHook(ctx, madmin.SRIAMItem{\n\t\t\t\tType: madmin.SRIAMItemPolicy,\n\t\t\t\tName: pname,\n\t\t\t\tPolicy: policyJSON,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn errSRIAMError(err)\n\t\t\t}\n\t\t}\n\t}\n\n\t{\n\t\t// Replicate policy mappings on local to all peers.\n\t\tuserPolicyMap := make(map[string]MappedPolicy)\n\t\tgroupPolicyMap := make(map[string]MappedPolicy)\n\t\tglobalIAMSys.store.rlock()\n\t\terrU := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, false, userPolicyMap)\n\t\terrG := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, true, groupPolicyMap)\n\t\tglobalIAMSys.store.runlock()\n\t\tif errU != nil {\n\t\t\treturn errSRBackendIssue(errU)\n\t\t}\n\t\tif errG != nil {\n\t\t\treturn errSRBackendIssue(errG)\n\t\t}\n\n\t\tfor user, mp := range userPolicyMap {\n\t\t\terr := c.IAMChangeHook(ctx, madmin.SRIAMItem{\n\t\t\t\tType: madmin.SRIAMItemPolicyMapping,\n\t\t\t\tPolicyMapping: &madmin.SRPolicyMapping{\n\t\t\t\t\tUserOrGroup: user,\n\t\t\t\t\tIsGroup: false,\n\t\t\t\t\tPolicy: mp.Policies,\n\t\t\t\t},\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn errSRIAMError(err)\n\t\t\t}\n\t\t}\n\n\t\tfor group, mp := range groupPolicyMap {\n\t\t\terr := c.IAMChangeHook(ctx, madmin.SRIAMItem{\n\t\t\t\tType: madmin.SRIAMItemPolicyMapping,\n\t\t\t\tPolicyMapping: &madmin.SRPolicyMapping{\n\t\t\t\t\tUserOrGroup: group,\n\t\t\t\t\tIsGroup: true,\n\t\t\t\t\tPolicy: mp.Policies,\n\t\t\t\t},\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn errSRIAMError(err)\n\t\t\t}\n\t\t}\n\t}\n\n\t{\n\t\t// Check for service accounts and replicate them. Only LDAP user\n\t\t// owned service accounts are supported for this operation.\n\t\tserviceAccounts := make(map[string]auth.Credentials)\n\t\tglobalIAMSys.store.rlock()\n\t\terr := globalIAMSys.store.loadUsers(ctx, svcUser, serviceAccounts)\n\t\tglobalIAMSys.store.runlock()\n\t\tif err != nil {\n\t\t\treturn errSRBackendIssue(err)\n\t\t}\n\t\tfor user, acc := range serviceAccounts {\n\t\t\tclaims, err := globalIAMSys.GetClaimsForSvcAcc(ctx, acc.AccessKey)\n\t\t\tif err != nil {\n\t\t\t\treturn errSRBackendIssue(err)\n\t\t\t}\n\t\t\tif claims != nil {\n\t\t\t\tif _, isLDAPAccount := claims[ldapUserN]; !isLDAPAccount {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\t_, policy, err := globalIAMSys.GetServiceAccount(ctx, acc.AccessKey)\n\t\t\tif err != nil {\n\t\t\t\treturn errSRBackendIssue(err)\n\t\t\t}\n\t\t\tvar policyJSON []byte\n\t\t\tif policy != nil {\n\t\t\t\tpolicyJSON, err = json.Marshal(policy)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn wrapSRErr(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\terr = c.IAMChangeHook(ctx, madmin.SRIAMItem{\n\t\t\t\tType: madmin.SRIAMItemSvcAcc,\n\t\t\t\tSvcAccChange: &madmin.SRSvcAccChange{\n\t\t\t\t\tCreate: &madmin.SRSvcAccCreate{\n\t\t\t\t\t\tParent: acc.ParentUser,\n\t\t\t\t\t\tAccessKey: user,\n\t\t\t\t\t\tSecretKey: acc.SecretKey,\n\t\t\t\t\t\tGroups: acc.Groups,\n\t\t\t\t\t\tClaims: claims,\n\t\t\t\t\t\tSessionPolicy: json.RawMessage(policyJSON),\n\t\t\t\t\t\tStatus: acc.Status,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn errSRIAMError(err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (b *Bolt) bucket(bucketName string) error {\n\n\treturn b.db.Update(func(tx *bolt.Tx) error {\n\t\tif _, err := tx.CreateBucketIfNotExists([]byte(bucketName)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}", "func syncBlobInfoToProject(info *util.BlobInfo) error {\n\t_, blob, err := dao.GetOrCreateBlob(&models.Blob{\n\t\tDigest: info.Digest,\n\t\tContentType: info.ContentType,\n\t\tSize: info.Size,\n\t\tCreationTime: time.Now(),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := dao.AddBlobToProject(blob.ID, info.ProjectID); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func Upload(w http.ResponseWriter, r *http.Request) {\n\t// Get uploaded file\n\tr.ParseMultipartForm(32 << 20)\n\tfile, _, err := r.FormFile(\"uploadFile\")\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\t// Get isPublic attribute\n\tIsPublicFromValue := r.FormValue(\"IsPublic\")\n\tif IsPublicFromValue == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tIsPublic, err := strconv.ParseBool(IsPublicFromValue)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Identify who the user is\n\tusername := r.Context().Value(\"username\")\n\tif username == nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Get user bucket id\n\tbucketID, err := GetUserGUID(username.(string))\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Generate a unique ID to identify the photo object\n\tphotoID := uuid.New().String()\n\n\t// Register photo in photos table\n\tphoto := Photo{\n\t\tID: photoID,\n\t\tIsPublic: IsPublic,\n\t\tUserID: *bucketID,\n\t}\n\tDB.Create(&photo)\n\n\t// Retrieve user's bucket\n\tbkt := Client.Bucket(getBucketForPhoto(photo))\n\n\t// Verify existence of bucket\n\t// Only run in production as Google Cloud Storage emulator for local development does not support metadata retrieval\n\t// TODO: Need more robust diaster recovery\n\tif !IsDebug {\n\t\t_, err = Client.Bucket(getBucketForPhoto(photo)).Attrs(r.Context())\n\t\tif err == storage.ErrBucketNotExist {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tw.Write([]byte(\"Bucket does not exist: \" + err.Error()))\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Upload photo to bucket\n\tobj := bkt.Object(photoID)\n\tobjWriter := obj.NewWriter(r.Context())\n\tif _, err := io.Copy(objWriter, file); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif err := objWriter.Close(); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Write([]byte(photoID))\n\tw.WriteHeader(http.StatusOK)\n}", "func (d *gcpVolDriver) syncWithHost() error {\n\tlog.Println(\"Synchronizing: load existing volumes into driver & Google Cloud Storage\")\n\t// get existing volumes defined for the driver\n\tvolumesNames, err := d.getVolumesFromHost()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, v := range volumesNames {\n\t\tlog.Printf(\"Synchronizing: existing volume '%s' found\\n\", v)\n\t\t// create a GCStorage bucket for that volume if not exist\n\t\tbucketName, err := d.handleCreateGCStorageBucket(v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// add this volume to the driver's in-memory map of volumes\n\t\td.mountedBuckets[v] = &gcsVolumes{\n\t\t\tvolume: &volume.Volume{\n\t\t\t\tName: v,\n\t\t\t\tMountpoint: filepath.Join(d.driverRootDir, v, \"_data\"),\n\t\t\t},\n\t\t\tgcsBucketName: bucketName,\n\t\t\tcleanCloud: true,\n\t\t}\n\t}\n\treturn nil\n}", "func (is *ImageStoreLocal) FinishBlobUpload(repo, uuid string, body io.Reader, dstDigest godigest.Digest) error {\n\tif err := dstDigest.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tsrc := is.BlobUploadPath(repo, uuid)\n\n\t_, err := os.Stat(src)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", src).Msg(\"failed to stat blob\")\n\n\t\treturn zerr.ErrUploadNotFound\n\t}\n\n\tblobFile, err := os.Open(src)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", src).Msg(\"failed to open blob\")\n\n\t\treturn zerr.ErrUploadNotFound\n\t}\n\n\tdefer blobFile.Close()\n\n\tdigester := sha256.New()\n\n\t_, err = io.Copy(digester, blobFile)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"repository\", repo).Str(\"blob\", src).Str(\"digest\", dstDigest.String()).\n\t\t\tMsg(\"unable to compute hash\")\n\n\t\treturn err\n\t}\n\n\tsrcDigest := godigest.NewDigestFromEncoded(godigest.SHA256, fmt.Sprintf(\"%x\", digester.Sum(nil)))\n\n\tif srcDigest != dstDigest {\n\t\tis.log.Error().Str(\"srcDigest\", srcDigest.String()).\n\t\t\tStr(\"dstDigest\", dstDigest.String()).Msg(\"actual digest not equal to expected digest\")\n\n\t\treturn zerr.ErrBadBlobDigest\n\t}\n\n\tdir := path.Join(is.rootDir, repo, \"blobs\", dstDigest.Algorithm().String())\n\n\tvar lockLatency time.Time\n\n\tis.Lock(&lockLatency)\n\tdefer is.Unlock(&lockLatency)\n\n\terr = ensureDir(dir, is.log)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Msg(\"error creating blobs/sha256 dir\")\n\n\t\treturn err\n\t}\n\n\tdst := is.BlobPath(repo, dstDigest)\n\n\tif is.dedupe && fmt.Sprintf(\"%v\", is.cache) != fmt.Sprintf(\"%v\", nil) {\n\t\terr = is.DedupeBlob(src, dstDigest, dst)\n\t\tif err := inject.Error(err); err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"src\", src).Str(\"dstDigest\", dstDigest.String()).\n\t\t\t\tStr(\"dst\", dst).Msg(\"unable to dedupe blob\")\n\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tif err := os.Rename(src, dst); err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"src\", src).Str(\"dstDigest\", dstDigest.String()).\n\t\t\t\tStr(\"dst\", dst).Msg(\"unable to finish blob\")\n\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
RegisterHandler registers a file descriptor with the Poller and returns a Pollable which can be used for reading/writing as well as readiness notification. File descriptors registered with the poller will be placed into nonblocking mode.
func (p *Poller) RegisterHandler(fd uintptr, h EventHandler, data interface{}) (*Pollable, error) { if err := syscall.SetNonblock(int(fd), true); err != nil { return nil, err } return p.register(fd, h, data) }
[ "func NewPoller(l logr.Logger, n notifier.Notifier, e <-chan string, fAddr string, pi int, pt int) (*Poller, error) {\n\tfluxURL, err := url.Parse(fmt.Sprintf(\"http://%v/api/flux\", fAddr))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Poller{\n\t\tLog: l,\n\t\tEvents: e,\n\t\tNotifier: n,\n\t\tInterval: pi,\n\t\tTimeout: pt,\n\t\tClient: client.New(http.DefaultClient, transport.NewAPIRouter(), fluxURL.String(), \"\"),\n\n\t\twg: sync.WaitGroup{},\n\t\tquit: make(chan struct{}),\n\t}, nil\n}", "func (s *Config) RegisterPollHandler(pollHandler func(msg *sqs.Message)) {\n\ts.pollHandler = pollHandler\n}", "func NewPoller() (*Epoll, error) {\n\treturn NewPollerWithBuffer(128)\n}", "func (h *Handler) AddPoller(c echo.Context) error {\n\tid := c.Param(\"id\")\n\trequest := &PollerRequest{}\n\tvar err error\n\tif err = c.Bind(request); err != nil {\n\t\treturn err\n\t}\n\tp := &poller.Poller{}\n\tp.UUID = uuid.NewV4().String()\n\tp.Action = request.Action\n\tp.Driver = &particleio.ParticleIO{\n\t\tUUID: p.UUID,\n\t\tDeviceID: request.DeviceID,\n\t\tAccessToken: request.AccessToken,\n\t}\n\tp.PollInterval, err = time.ParseDuration(request.PollInterval)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.IsPolling = request.IsPolling\n\tp.User = id\n\n\tdb := h.DB.Clone()\n\tdefer db.Close()\n\n\tif err := db.DB(\"oxylus\").C(\"pollers\").Insert(&p); err != nil {\n\t\treturn err\n\t}\n\t// if ispolling then send the poller to the registry\n\t// turn this into a channel\n\th.PollerRegistry.Add(p.UUID, p)\n\treturn c.NoContent(http.StatusCreated)\n}", "func (w *filePoller) Add(name string) error {\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\n\tif w.closed {\n\t\treturn errPollerClosed\n\t}\n\n\tf, err := os.Open(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfi, err := os.Stat(name)\n\tif err != nil {\n\t\tf.Close()\n\t\treturn err\n\t}\n\n\tif w.watches == nil {\n\t\tw.watches = make(map[string]chan struct{})\n\t}\n\tif _, exists := w.watches[name]; exists {\n\t\tf.Close()\n\t\treturn fmt.Errorf(\"watch exists\")\n\t}\n\tchClose := make(chan struct{})\n\tw.watches[name] = chClose\n\n\tgo w.watch(f, fi, chClose)\n\treturn nil\n}", "func NewPoller(poll PollerFunc, interval time.Duration) *Poller {\n\treturn &Poller{\n\t\tChannel: make(chan interface{}),\n\t\tPoll: poll,\n\t\tWaitInterval: interval,\n\t\tisStopped: false,\n\t\tisFinished: false,\n\t\tgroup: &sync.WaitGroup{},\n\t\tstopMutex: &sync.Mutex{},\n\t}\n}", "func NewPoller(getFunc GetFunc, period time.Duration, store Store) *Poller {\n\treturn &Poller{\n\t\tgetFunc: getFunc,\n\t\tperiod: period,\n\t\tstore: store,\n\t}\n}", "func Watch(specfile, dir string, fallback http.Handler, stderr io.Writer) *Handler {\n\tif stderr == nil {\n\t\tstderr = ioutil.Discard\n\t}\n\th := &Handler{\n\t\tspecfile: specfile,\n\t\tdir: dir,\n\t\tfallback: fallback,\n\t\tstderr: stderr,\n\t\tlogger: log.New(stderr, \"\", log.LstdFlags),\n\t}\n\tgo func() {\n\t\th.mend()\n\t\ttime.AfterFunc(time.Second/5, func() {\n\t\t\th.OnChange()\n\t\t})\n\t\th.watch()\n\t}()\n\treturn h\n}", "func NewPoller() *Poller {\n\treturn &Poller{}\n}", "func (s *GRPCGateway) RegisterHandler(ctx context.Context, f func(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error)) error {\n\tif err := f(ctx, s.gwmux, s.address, []grpc.DialOption{grpc.WithInsecure()}); err != nil {\n\t\treturn errors.Wrap(err, \"RegisterHandler\")\n\t}\n\treturn nil\n}", "func (c *Client) RegisterHandler(n string, h EventHandler) {\n var handlers []EventHandler\n\n reg, ok := c.Subscribers.Load(n)\n\n if ok {\n handlers = reg.([]EventHandler)\n }\n\n handlers = append(handlers, h)\n c.Subscribers.Store(n, handlers)\n}", "func New() (*Poller, error) {\n\tp, err := newEpoll()\n\treturn &Poller{poller: p}, err\n}", "func NewWatcher(bufsize, sysBufSize int, sleepTime time.Duration, fn func([]*WatchEvent),\n) (w *Watcher, err error) {\n\tfd, err := syscall.InotifyInit()\n\tif err != nil {\n\t\treturn\n\t}\n\tif fd == -1 {\n\t\terr = os.NewSyscallError(\"inotify_init\", err)\n\t\treturn\n\t}\n\tif useNonBlock {\n\t\tsyscall.SetNonblock(fd, true)\n\t}\n\tw = &Watcher{\n\t\tfd: fd,\n\t\tfn: fn,\n\t\tev: make(chan []*WatchEvent, bufsize),\n\t\twds: make(map[int32]string),\n\t\tflags: make(map[string]uint32),\n\t\tsl: sleepTime,\n\t\tsysbufsize: sysBufSize,\n\t}\n\tgo w.readEvents()\n\tgo w.handleEvents()\n\treturn\n}", "func NewPoller() *Poller {\n\treturn &Poller{\n\t\tTimeout: 3 * time.Second,\n\t\tTimeoutRetries: 2,\n\t}\n}", "func NewFilePoller(ctx context.Context, f FileChannel, pollTimeout time.Duration) *FilePoller {\n\treturn &FilePoller{File: f, ctx: ctx, pollTimeout: pollTimeout}\n}", "func (l *Loader) NewWatcher(done <-chan struct{}) <-chan error {\n\tif !l.watcher {\n\t\treturn nil\n\t}\n\tupdate := make(chan error)\n\tw := watcher{}\n\tgo w.watcher(l.filename, done, update)\n\treturn update\n}", "func NewWatcher(file string, deadTime time.Duration) (*Watcher, error) {\n\tfileToWatch, err := filepath.Abs(file)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Couldn't get absolute path of file. \" + err.Error())\n\t}\n\tdirectoryToWatch := filepath.Dir(fileToWatch)\n\n\tw := Watcher{\n\t\tC: make(chan struct{}),\n\t\tstopChan: make(chan struct{}),\n\t}\n\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn nil, errors.New(\"Couldn't establish watcher. \" + err.Error())\n\t}\n\n\tgo func() {\n\t\tt := time.NewTimer(deadTime)\n\t\ttimerRunning := true\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event := <-watcher.Events:\n\t\t\t\t// Received an event. Check it's for our file.\n\t\t\t\teventFile, evErr := filepath.Abs(event.Name)\n\t\t\t\tif evErr != nil || eventFile != fileToWatch {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// It's for our file so stop and restart the timer.\n\t\t\t\tif timerRunning {\n\t\t\t\t\tif !t.Stop() {\n\t\t\t\t\t\t// empty the timer chan if we failed to stop it\n\t\t\t\t\t\t<-t.C\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tt.Reset(deadTime)\n\t\t\t\ttimerRunning = true\n\t\t\tcase watcherErr := <-watcher.Errors:\n\t\t\t\tlog.Println(\"Throttled Watcher error:\", watcherErr)\n\t\t\tcase <-t.C:\n\t\t\t\ttimerRunning = false\n\t\t\t\tw.C <- struct{}{}\n\t\t\tcase <-w.stopChan:\n\t\t\t\tif timerRunning {\n\t\t\t\t\tt.Stop()\n\t\t\t\t}\n\t\t\t\twatcher.Close()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\terr = watcher.Add(directoryToWatch)\n\tif err != nil {\n\t\tw.stopChan <- struct{}{}\n\t\treturn nil, errors.New(\"Couldn't watch directory. \" + err.Error())\n\t}\n\n\treturn &w, nil\n\n}", "func RegisterHandler() {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt, syscall.SIGTERM)\n\tgo func() {\n\t\t<-c\n\t\tdoneMu.Lock()\n\t\tdone = true\n\t\tdoneMu.Unlock()\n\t}()\n}", "func StartPoller() {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\t// receive and write a message from the queue\n\t\t\tcase queueItem := <-logQueue:\n\t\t\t\tperformWrite(queueItem)\n\n\t\t\t\t// receive and write a message from the queue\n\t\t\tcase queueItem := <-logQueueBuffer:\n\t\t\t\tperformWrite(queueItem)\n\n\t\t\t\t// stop polling for logs to write\n\t\t\tcase <-exitCh:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetInstNameField returns the inst name field
func GetInstNameField(objID string) string { switch objID { case BKInnerObjIDApp: return BKAppNameField case BKInnerObjIDSet: return BKSetNameField case BKInnerObjIDModule: return BKModuleNameField case BKInnerObjIDObject: return BKInstNameField case BKInnerObjIDHost: return BKHostNameField case BKInnerObjIDProc: return BKProcNameField case BKInnerObjIDPlat: return BKCloudNameField case BKTableNameInstAsst: return BKFieldID default: if IsObjectInstAsstShardingTable(objID) { return BKFieldID } return BKInstNameField } }
[ "func (addenda10 *Addenda10) NameField() string {\n\treturn addenda10.alphaField(addenda10.Name, 35)\n}", "func (o *SingleSelectFieldField) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (o *LinkRowFieldField) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (f *Field) Name() string {\n\tjsonTag := reflect.StructTag(f.Tag.Value[1 : len(f.Tag.Value)-1]).Get(\"json\") // Delete first and last quotation\n\tjsonTag = strings.Split(jsonTag, \",\")[0] // This can return \"-\"\n\tif jsonTag != \"\" {\n\t\treturn jsonTag\n\t}\n\n\tif f.Names != nil {\n\t\treturn f.Names[0].Name\n\t}\n\n\treturn f.Type.(*ast.Ident).Name\n}", "func (f StandInstDbNameField) Tag() quickfix.Tag { return tag.StandInstDbName }", "func (f BinaryField) GetName() string {\n\treturn f.name\n}", "func fieldname(field *ast.Field) string {\n\treturn field.Names[0].Name\n}", "func (f *Field) Name() string {\n\treturn f.field.Name\n}", "func (field Field) Name() string {\n\tif len(field.Names) > 0 {\n\t\treturn field.Names[0].String()\n\t}\n\n\t// The field has no name, so we use Type name as the field name.\n\treturn itemTypeName(field.TypeValue.Type).Name\n}", "func getInstanceName(infra infraObject) string {\n\t// cloud-init will split the hostname on '.' and set the hostname to the first chunk. This causes an issue where all\n\t// nodes in a machine pool may have the same node name in Kubernetes. Converting the '.' to '-' here prevents this.\n\tinstanceName := strings.ReplaceAll(infra.meta.GetName(), \".\", \"-\")\n\tinstanceName = name2.SafeConcatName(instanceName)\n\n\treturn instanceName\n}", "func (c *Checker) getFieldName(field reflect.StructField) string {\n\tname := field.Name\n\tif c.JSONTag != nil {\n\t\tif val, ok := field.Tag.Lookup(\"json\"); ok {\n\t\t\tname = strings.Split(val, \",\")[0]\n\t\t}\n\t}\n\tif name == \"-\" {\n\t\tif !c.JSONTag.IgnoreDashFields {\n\t\t\tname = field.Name\n\t\t}\n\t}\n\treturn name\n}", "func (c *STableField) Name() string {\n\tif len(c.alias) > 0 {\n\t\treturn c.alias\n\t}\n\treturn c.spec.Name()\n}", "func (d Digest) GetInstanceName() InstanceName {\n\t_, _, _, sizeBytesEnd := d.unpack()\n\treturn InstanceName{\n\t\tvalue: d.value[sizeBytesEnd+1:],\n\t}\n}", "func (e *Encoder) getFieldName(field reflect.StructField) string {\n\tif e.useTags {\n\t\tname := field.Tag.Get(e.tag)\n\t\t// skip columns tagged with -\n\t\tif name == \"-\" {\n\t\t\treturn \"\"\n\t\t}\n\t\tif name != \"\" {\n\t\t\treturn name\n\t\t}\n\t}\n\treturn field.Name\n\n}", "func name(v reflect.StructField) string {\n\tif name, ok := v.Tag.Lookup(\"name\"); ok {\n\t\treturn name\n\t}\n\treturn v.Name\n}", "func (t *SentryTaggedStruct) GetName() string {\n\treturn \"\"\n}", "func (s *MyStruct) Name() string {\n\treturn s.field_Name\n}", "func (f *DialectMessageField) GetName() string {\n\treturn f.name\n}", "func NewStandInstDbName(val string) StandInstDbNameField {\n\treturn StandInstDbNameField{quickfix.FIXString(val)}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetInstIDField get primary key of object's collection/table
func GetInstIDField(objType string) string { switch objType { case BKInnerObjIDApp: return BKAppIDField case BKInnerObjIDSet: return BKSetIDField case BKInnerObjIDModule: return BKModuleIDField case BKInnerObjIDObject: return BKInstIDField case BKInnerObjIDHost: return BKHostIDField case BKInnerObjIDProc: return BKProcIDField case BKInnerObjIDPlat: return BKCloudIDField case BKTableNameInstAsst: return BKFieldID case BKTableNameServiceInstance: return BKFieldID case BKTableNameServiceTemplate: return BKFieldID case BKTableNameProcessTemplate: return BKFieldID case BKTableNameProcessInstanceRelation: return BKProcessIDField default: if IsObjectInstAsstShardingTable(objType) { return BKFieldID } return BKInstIDField } }
[ "func hostGetObjectId(objId int32, keyId int32, typeId int32) int32", "func getPKID(c *gin.Context) (models.PKID, error) {\n\tid := c.Param(\"id\")\n\tpkid, err := strconv.Atoi(id)\n\tif err != nil {\n\t\treturn 0, apperrors.NewNotFound(errors.New(\"Resource not found\"))\n\t}\n\treturn models.PKID(pkid), err\n}", "func (p *Pet) GetPrimaryKeyAddress() interface{} {\n\treturn &p.ID\n}", "func (_this *IDBCursor) PrimaryKey() js.Value {\n\tvar ret js.Value\n\tvalue := _this.Value_JS.Get(\"primaryKey\")\n\tret = value\n\treturn ret\n}", "func getKeyByID(c *gin.Context, id string) (error, *models.Key) {\n\tdatabase := c.MustGet(\"db\").(*mgo.Database)\n\toID := bson.ObjectIdHex(id)\n\tkey := models.Key{}\n\terr := database.C(models.CollectionKey).FindId(oID).One(&key)\n\tif err != nil {\n\t\treturn err, nil\n\t}\n\n\treturn nil, &key\n}", "func (c *DefaultConstr) GetPrimaryKey(conn *sql.DB, schema string, tableName string) ([]string, error) {\n\treturn nil, ErrorNotSupport\n}", "func (ti *TableInfo) GetPrimaryKey() *FieldConstraint {\n\tfor _, f := range ti.FieldConstraints {\n\t\tif f.IsPrimaryKey {\n\t\t\treturn f\n\t\t}\n\t}\n\n\treturn nil\n}", "func (t *Table) GetPrimaryKey() error {\n\n\tsql := `SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = ? and TABLE_name = ? AND COLUMN_KEY = 'PRI'`\n\n\trows, err := t.db.Query(sql, t.schema, t.table)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"db.Query failed while trying to get primary key: %v\", err)\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\terr := rows.Scan(&t.pkCol, &t.pkType)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"rows.Scan failed while trying to get primary key: %v\", err)\n\t\t}\n\t\t// Missing: check data_type in\n\t\t// Missing: check one row back\n\t\tbreak\n\t}\n\terr = rows.Err()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"rows.Err() is set when getting primary key: %v\", err)\n\t}\n\treturn nil\n}", "func (k *Key) IntID() int64 { return k.toks[len(k.toks)-1].IntID }", "func (t *badgerTableVersion) getObjKey(id, objID []byte) []byte {\n\tprefix := []byte(t.prefix + \"object/\")\n\tprefix = append(prefix, id...)\n\tprefix = append(prefix, '/')\n\n\treturn append(prefix, objID...)\n}", "func (fieldType *AdaReferentialType) PrimaryKeyName() string {\n\tif fieldType.keys[0] == \"\" {\n\t\treturn \"ISN\"\n\t}\n\treturn fieldType.keys[0]\n}", "func (d Document) PrimaryField() string {\n\tif fields := d.PrimaryFields(); len(fields) == 1 {\n\t\treturn fields[0]\n\t}\n\n\tpanic(\"rel: composite primary key is not supported\")\n}", "func (m *Model) GetPK() string {\n\treturn m.PrimaryKey\n}", "func (p *CockroachDriver) PrimaryKeyInfo(schema, tableName string) (*bdb.PrimaryKey, error) {\n\tPrintName(\"PrimaryKeyInfo\")\n\tpkey := &bdb.PrimaryKey{}\n\tvar err error\n\n\tquery := `\n\tselect tc.constraint_name\n\tfrom ` + schema + `.rveg_primary_keys as tc\n\twhere tc.table_name = $1 and tc.table_schema = $2\n\t;\n\t`\n\n\trow := p.dbConn.QueryRow(query, tableName, schema)\n\n\tif err = row.Scan(&pkey.Name); err != nil {\n\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tqueryColumns := `\n\tselect kcu.column_name\n\tfrom information_schema.key_column_usage as kcu\n\twhere constraint_name = $1\n\t\tand table_schema = $2\n\tlimit 1\n\t;`\n\n\tvar rows *sql.Rows\n\tif rows, err = p.dbConn.Query(queryColumns, pkey.Name, schema); err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\tvar col = \"\"\n\tvar columns []string\n\tvar xC = 0\n\n\tfor rows.Next() {\n\t\tvar column string\n\n\t\terr = rows.Scan(&column)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif col == \"\" {\n\t\t\tcol = column\n\t\t}\n\t\tif column == \"id\" {\n\t\t\txC++\n\t\t}\n\t\tif column != col {\n\t\t\tif xC > 0 {\n\t\t\t\tcolumns = append(columns, column)\n\t\t\t}\n\t\t}\n\t\tcol = column\n\t}\n\tcolumns = append(columns, col)\n\tif err = rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tpkey.Columns = columns\n\n\treturn pkey, nil\n}", "func (dd *DatabaseDiff) ObjectKey() ObjectKey {\n\tif dd == nil || (dd.From == nil && dd.To == nil) {\n\t\treturn ObjectKey{}\n\t}\n\tif dd.From == nil {\n\t\treturn dd.To.ObjectKey()\n\t}\n\treturn dd.From.ObjectKey()\n}", "func (self *Devices) GetPrimaryKeyValue() int64 {\n return self.Id\n}", "func getRefModelPk(field modelField) *int64 {\n\tif field.value.IsNil() {\n\t\treturn nil\n\t}\n\tmi, err := getModelInfo(field.value.Interface())\n\tif err != nil {\n\t\treturn nil\n\t}\n\tfor _, field := range mi.fields {\n\t\tif isPkField(field) {\n\t\t\tif !isZeroField(field.value) {\n\t\t\t\tif field.value.Kind() == reflect.Int64 {\n\t\t\t\t\treturn field.value.Addr().Interface().(*int64)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (m *TestModel) PrimaryKey() string {\n\treturn m.ID\n}", "func (t table) idKey(id schema.Key) string {\n\treturn fmt.Sprintf(\"%s:%s\", t.desc.Name, id)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsInnerMainlineModel judge if the object type is inner mainline model
func IsInnerMainlineModel(objType string) bool { switch objType { case BKInnerObjIDApp, BKInnerObjIDSet, BKInnerObjIDModule: return true default: return false } }
[ "func (t Type) IsModel() bool {\n\t_, isModel := t.impl.(*modelImpl)\n\treturn isModel\n}", "func (ref *UIElement) IsMain() bool {\n\tret, _ := ref.BoolAttr(MainAttribute)\n\treturn ret\n}", "func (c Category) IsMainSet() bool {\n\treturn c.Main != 0\n}", "func (gt *GoTezos) IsMainnet() bool {\n\tif len(gt.Versions) > 0 {\n\t\treturn gt.Versions[0].Network == \"BETANET\"\n\t}\n\treturn false\n}", "func (o *LinkRouteTable) GetMainOk() (*bool, bool) {\n\tif o == nil || o.Main == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Main, true\n}", "func (bd *BlockDAG) isOnMainChain(h *hash.Hash) bool {\n\treturn bd.instance.IsOnMainChain(bd.getBlock(h))\n}", "func (a Ad) IsInline() bool {\n\treturn a.InLine != nil\n}", "func (s *BasePlSqlParserListener) EnterMain_model(ctx *Main_modelContext) {}", "func (meta MVCCMetadata) IsInline() bool {\n\treturn meta.RawBytes != nil\n}", "func (o *LinkRouteTable) HasMain() bool {\n\tif o != nil && o.Main != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (t *Thread) IsMain() bool {\n\treturn t == t.mainThread\n}", "func isRoot(e *yang.Entry) bool {\n\treturn e.Parent == nil\n}", "func (o *LinkRouteTable) GetMain() bool {\n\tif o == nil || o.Main == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.Main\n}", "func (o *ForecastModelAllOf) HasModelType() bool {\n\tif o != nil && o.ModelType != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (con *Conflux) IsOnMainChain(b IBlock) bool {\n\tfor p := con.privotTip; p != nil; p = con.bd.getBlockById(p.GetMainParent()) {\n\t\tif p.GetHash().IsEqual(b.GetHash()) {\n\t\t\treturn true\n\t\t}\n\t\tif p.GetLayer() < b.GetLayer() {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn false\n}", "func (o *ConvergedinfraServerComplianceDetailsAllOf) HasModel() bool {\n\tif o != nil && o.Model != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func isSelfReferencedObj(obj *unstructured.Unstructured, aiv argo.AppInstanceValue) bool {\n\treturn (obj.GetNamespace() == aiv.Namespace || obj.GetNamespace() == \"\") &&\n\t\tobj.GetName() == aiv.Name &&\n\t\tobj.GetObjectKind().GroupVersionKind().Group == aiv.Group &&\n\t\tobj.GetObjectKind().GroupVersionKind().Kind == aiv.Kind\n}", "func (bd *BlockDAG) IsOnMainChain(h *hash.Hash) bool {\n\tbd.stateLock.Lock()\n\tdefer bd.stateLock.Unlock()\n\n\treturn bd.isOnMainChain(h)\n}", "func (e *entity) isNestedEmpty() bool {\n\tif len(e.fieldsByName) == 0 {\n\t\treturn true\n\t}\n\n\tfor _, l := range e.fieldsByName {\n\t\tif !l.leader().isEmpty() {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewNodeResourceTopologies makes a CRD golang object representing NodeResourceTopology definition
func NewNodeResourceTopologies() (*apiextensionsv1.CustomResourceDefinition, error) { _, file, _, ok := runtime.Caller(0) if !ok { return nil, fmt.Errorf("cannot retrieve manifests directory") } baseDir := filepath.Dir(file) crdPath := filepath.Clean(filepath.Join(baseDir, "..", "..", "..", "deployment", "base", "noderesourcetopologies-crd", "noderesourcetopologies.yaml")) data, err := os.ReadFile(crdPath) if err != nil { return nil, err } decode := scheme.Codecs.UniversalDeserializer().Decode obj, _, err := decode(data, nil, nil) if err != nil { return nil, err } crd, ok := obj.(*apiextensionsv1.CustomResourceDefinition) if !ok { return nil, fmt.Errorf("unexpected type, got %t", obj) } return crd, nil }
[ "func CreateNodeResourceTopologies(ctx context.Context, extClient extclient.Interface) (*apiextensionsv1.CustomResourceDefinition, error) {\n\tcrd, err := NewNodeResourceTopologies()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Delete existing CRD (if any) with this we also get rid of stale objects\n\terr = extClient.ApiextensionsV1().CustomResourceDefinitions().Delete(ctx, crd.Name, metav1.DeleteOptions{})\n\tif err != nil && !errors.IsNotFound(err) {\n\t\treturn nil, fmt.Errorf(\"failed to delete NodeResourceTopology CRD: %w\", err)\n\t}\n\n\t// It takes time for the delete operation, wait until the CRD completely gone\n\tif err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 1*time.Minute, true, func(ctx context.Context) (bool, error) {\n\t\t_, err = extClient.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, crd.Name, metav1.GetOptions{})\n\t\tif err == nil {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, err\n\t}); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get NodeResourceTopology CRD: %w\", err)\n\t}\n\treturn extClient.ApiextensionsV1().CustomResourceDefinitions().Create(ctx, crd, metav1.CreateOptions{})\n}", "func Topology(node uint32) (*TopologyConn, error) {\n\tsa := &unix.TIPCServiceName{\n\t\tType: unix.TIPC_TOP_SRV,\n\t\tInstance: unix.TIPC_TOP_SRV,\n\t\tDomain: node,\n\t}\n\n\tst := &unix.SockaddrTIPC{\n\t\tScope: unix.TIPC_CLUSTER_SCOPE,\n\t\tAddr: sa,\n\t}\n\n\tc, err := tipc.DialSequentialPacket(st)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &TopologyConn{conn: c}, nil\n}", "func NewTopo(xwidth, yheight uint) Topo {\n\tt := make(Topo, xwidth)\n\tfor x := range t {\n\t\tt[x] = make([]float64, yheight)\n\t}\n\treturn t\n}", "func GetNodeTopology(ctx context.Context, topologyClient *topologyclientset.Clientset, nodeName string) *v1alpha2.NodeResourceTopology {\n\tvar nodeTopology *v1alpha2.NodeResourceTopology\n\tvar err error\n\tgomega.EventuallyWithOffset(1, func() bool {\n\t\tnodeTopology, err = topologyClient.TopologyV1alpha2().NodeResourceTopologies().Get(ctx, nodeName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tframework.Logf(\"failed to get the node topology resource: %v\", err)\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}, time.Minute, 5*time.Second).Should(gomega.BeTrue())\n\treturn nodeTopology\n}", "func IsValidNodeTopology(nodeTopology *v1alpha2.NodeResourceTopology, kubeletConfig *kubeletconfig.KubeletConfiguration) bool {\n\tif nodeTopology == nil || len(nodeTopology.TopologyPolicies) == 0 {\n\t\tframework.Logf(\"failed to get topology policy from the node topology resource\")\n\t\treturn false\n\t}\n\n\ttmPolicy := string(topologypolicy.DetectTopologyPolicy(kubeletConfig.TopologyManagerPolicy, kubeletConfig.TopologyManagerScope))\n\tif nodeTopology.TopologyPolicies[0] != tmPolicy {\n\t\tframework.Logf(\"topology policy mismatch got %q expected %q\", nodeTopology.TopologyPolicies[0], tmPolicy)\n\t\treturn false\n\t}\n\n\texpectedPolicyAttribute := v1alpha2.AttributeInfo{\n\t\tName: nfdtopologyupdater.TopologyManagerPolicyAttributeName,\n\t\tValue: kubeletConfig.TopologyManagerPolicy,\n\t}\n\tif !containsAttribute(nodeTopology.Attributes, expectedPolicyAttribute) {\n\t\tframework.Logf(\"topology policy attributes don't have correct topologyManagerPolicy attribute expected %v attributeList %v\", expectedPolicyAttribute, nodeTopology.Attributes)\n\t\treturn false\n\t}\n\n\texpectedScopeAttribute := v1alpha2.AttributeInfo{\n\t\tName: nfdtopologyupdater.TopologyManagerScopeAttributeName,\n\t\tValue: kubeletConfig.TopologyManagerScope,\n\t}\n\tif !containsAttribute(nodeTopology.Attributes, expectedScopeAttribute) {\n\t\tframework.Logf(\"topology policy attributes don't have correct topologyManagerScope attribute expected %v attributeList %v\", expectedScopeAttribute, nodeTopology.Attributes)\n\t\treturn false\n\t}\n\n\tif nodeTopology.Zones == nil || len(nodeTopology.Zones) == 0 {\n\t\tframework.Logf(\"failed to get topology zones from the node topology resource\")\n\t\treturn false\n\t}\n\n\tfoundNodes := 0\n\tfor _, zone := range nodeTopology.Zones {\n\t\t// TODO constant not in the APIs\n\t\tif !strings.HasPrefix(strings.ToUpper(zone.Type), \"NODE\") {\n\t\t\tcontinue\n\t\t}\n\t\tfoundNodes++\n\n\t\tif !isValidCostList(zone.Name, zone.Costs) {\n\t\t\tframework.Logf(\"invalid cost list for zone %q\", zone.Name)\n\t\t\treturn false\n\t\t}\n\n\t\tif !isValidResourceList(zone.Name, zone.Resources) {\n\t\t\tframework.Logf(\"invalid resource list for zone %q\", zone.Name)\n\t\t\treturn false\n\t\t}\n\t}\n\treturn foundNodes > 0\n}", "func AllocatableResourceListFromNodeResourceTopology(nodeTopo *v1alpha2.NodeResourceTopology) map[string]corev1.ResourceList {\n\tallocRes := make(map[string]corev1.ResourceList)\n\tfor _, zone := range nodeTopo.Zones {\n\t\tif zone.Type != \"Node\" {\n\t\t\tcontinue\n\t\t}\n\t\tresList := make(corev1.ResourceList)\n\t\tfor _, res := range zone.Resources {\n\t\t\tresList[corev1.ResourceName(res.Name)] = res.Allocatable.DeepCopy()\n\t\t}\n\t\tif len(resList) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tallocRes[zone.Name] = resList\n\t}\n\treturn allocRes\n}", "func newResource() *resource.Resource {\n\tr, _ := resource.Merge(\n\t\tresource.Default(),\n\t\tresource.NewWithAttributes(\n\t\t\tsemconv.SchemaURL,\n\t\t\tsemconv.ServiceNameKey.String(\"opentelemetry-server\"),\n\t\t\tsemconv.ServiceVersionKey.String(\"v0.1.0\"),\n\t\t\tattribute.String(\"environment\", \"demo\"),\n\t\t),\n\t)\n\treturn r\n}", "func NewTopology() Topology {\n\treturn &topologyS{\n\t\tTopology: &topology.Topo{},\n\t}\n}", "func (fm *FCFSModel)TaskNodeToResource(td *TaskDescriptor,rd *ResDescriptor)*ArcDescriptor{\n\t//ct := uint64(rand.Intn(100))\n\treturn &ArcDescriptor{\n\t\tcost: fm.TaskToResCost(td,rd),\n\t\tcapLower :0,\n\t\tcapUpper: 1,\n\t}\n}", "func NewResources(p fsm.ExecutorParams, operator ops.Operator) (*resourcesExecutor, error) {\n\tlogger := &fsm.Logger{\n\t\tFieldLogger: logrus.WithFields(logrus.Fields{\n\t\t\tconstants.FieldPhase: p.Phase.ID,\n\t\t}),\n\t\tKey: opKey(p.Plan),\n\t\tOperator: operator,\n\t\tServer: p.Phase.Data.Server,\n\t}\n\treturn &resourcesExecutor{\n\t\tFieldLogger: logger,\n\t\tExecutorParams: p,\n\t}, nil\n}", "func (zic *ZoneInterconnectHandler) createRemoteZoneNodeResources(node *corev1.Node, nodeID int, chassisId string) error {\n\tnodeTransitSwitchPortIPs, err := util.ParseNodeTransitSwitchPortAddrs(node)\n\tif err != nil || len(nodeTransitSwitchPortIPs) == 0 {\n\t\treturn fmt.Errorf(\"failed to get the node transit switch port Ips : %w\", err)\n\t}\n\n\tnetworkId, err := util.ParseNetworkIDAnnotation(node, zic.GetNetworkName())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get the network id for the network %s on node %s: %v\", zic.GetNetworkName(), node.Name, err)\n\t}\n\n\ttransitSwitchTunnelKey := BaseTransitSwitchTunnelKey + networkId\n\tts := &nbdb.LogicalSwitch{\n\t\tName: zic.networkTransitSwitchName,\n\t\tOtherConfig: map[string]string{\n\t\t\t\"interconn-ts\": zic.networkTransitSwitchName,\n\t\t\t\"requested-tnl-key\": strconv.Itoa(transitSwitchTunnelKey),\n\t\t\t\"mcast_snoop\": \"true\",\n\t\t\t\"mcast_flood_unregistered\": \"true\",\n\t\t},\n\t}\n\n\t// Create transit switch if it doesn't exist\n\tif err := libovsdbops.CreateOrUpdateLogicalSwitch(zic.nbClient, ts); err != nil {\n\t\treturn fmt.Errorf(\"failed to create/update transit switch %s: %w\", zic.networkTransitSwitchName, err)\n\t}\n\n\ttransitRouterPortMac := util.IPAddrToHWAddr(nodeTransitSwitchPortIPs[0].IP)\n\tvar transitRouterPortNetworks []string\n\tfor _, ip := range nodeTransitSwitchPortIPs {\n\t\ttransitRouterPortNetworks = append(transitRouterPortNetworks, ip.String())\n\t}\n\n\tremotePortAddr := transitRouterPortMac.String()\n\tfor _, tsNetwork := range transitRouterPortNetworks {\n\t\tremotePortAddr = remotePortAddr + \" \" + tsNetwork\n\t}\n\n\tlspOptions := map[string]string{\n\t\t\"requested-tnl-key\": strconv.Itoa(nodeID),\n\t}\n\t// Store the node name in the external_ids column for book keeping\n\texternalIDs := map[string]string{\n\t\t\"node\": node.Name,\n\t}\n\n\tremotePortName := zic.GetNetworkScopedName(types.TransitSwitchToRouterPrefix + node.Name)\n\tif err := zic.addNodeLogicalSwitchPort(zic.networkTransitSwitchName, remotePortName, lportTypeRemote, []string{remotePortAddr}, lspOptions, externalIDs); err != nil {\n\t\treturn err\n\t}\n\t// Set the port binding chassis.\n\tif err := zic.setRemotePortBindingChassis(node.Name, remotePortName, chassisId); err != nil {\n\t\treturn err\n\t}\n\n\tif err := zic.addRemoteNodeStaticRoutes(node, nodeTransitSwitchPortIPs); err != nil {\n\t\treturn err\n\t}\n\n\t// Cleanup the logical router port connecting to the transit switch for the remote node (if present)\n\t// Cleanup would be required when a local zone node moves to a remote zone.\n\treturn zic.cleanupNodeClusterRouterPort(node.Name)\n}", "func newResourceReservation(driverNode string, executorNodes []string, driver *v1.Pod, driverResources, executorResources *resources.Resources) *v1beta1.ResourceReservation {\n\treservations := make(map[string]v1beta1.Reservation, len(executorNodes)+1)\n\treservations[\"driver\"] = v1beta1.Reservation{\n\t\tNode: driverNode,\n\t\tCPU: driverResources.CPU,\n\t\tMemory: driverResources.Memory,\n\t}\n\tfor idx, nodeName := range executorNodes {\n\t\treservations[executorReservationName(idx)] = v1beta1.Reservation{\n\t\t\tNode: nodeName,\n\t\t\tCPU: executorResources.CPU,\n\t\t\tMemory: executorResources.Memory,\n\t\t}\n\t}\n\treturn &v1beta1.ResourceReservation{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: driver.Labels[common.SparkAppIDLabel],\n\t\t\tNamespace: driver.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(driver, podGroupVersionKind)},\n\t\t\tLabels: map[string]string{\n\t\t\t\tv1beta1.AppIDLabel: driver.Labels[common.SparkAppIDLabel],\n\t\t\t},\n\t\t},\n\t\tSpec: v1beta1.ResourceReservationSpec{\n\t\t\tReservations: reservations,\n\t\t},\n\t\tStatus: v1beta1.ResourceReservationStatus{\n\t\t\tPods: map[string]string{\"driver\": driver.Name},\n\t\t},\n\t}\n}", "func newTopoClient(cfg ServiceConfig) (topoapi.TopoClient, error) {\n\topts := []grpc.DialOption{\n\t\tgrpc.WithStreamInterceptor(southbound.RetryingStreamClientInterceptor(100 * time.Millisecond)),\n\t}\n\tif cfg.Insecure {\n\t\topts = append(opts, grpc.WithInsecure())\n\t}\n\n\tconn, err := getTopoConn(\"onos-topo\", opts...)\n\tif err != nil {\n\t\tstat, ok := status.FromError(err)\n\t\tif ok {\n\t\t\tlog.Error(\"Unable to connect to topology service\", err)\n\t\t\treturn nil, errors.FromStatus(stat)\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn topoapi.NewTopoClient(conn), nil\n}", "func newTopologyTagger() Tagger {\n\treturn &topologyTagger{}\n}", "func createResources() templateResources {\n\tres := templateResources{\n\t\tVariables: make(map[string]interface{}),\n\t\tCluster: &kubermaticv1.Cluster{\n\t\t\tSpec: kubermaticv1.ClusterSpec{\n\t\t\t\tClusterNetwork: kubermaticv1.ClusterNetworkingConfig{\n\t\t\t\t\tPods: kubermaticv1.NetworkRanges{\n\t\t\t\t\t\tCIDRBlocks: []string{\n\t\t\t\t\t\t\t\"172.25.0.0/16\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tDNSClusterIP: \"1.2.3.4\",\n\t}\n\tres.Variables[\"NodeAccessNetwork\"] = \"172.26.0.0/16\"\n\treturn res\n}", "func (c *Client) Topology(ctx context.Context) (topology Topology, err error) {\n\tvar t debugapi.Topology\n\tfor r := 0; r < c.retry; r++ {\n\t\ttime.Sleep(2 * time.Duration(r) * time.Second)\n\n\t\tt, err = c.debug.Node.Topology(ctx)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\tif err != nil {\n\t\treturn Topology{}, fmt.Errorf(\"get topology: %w\", err)\n\t}\n\n\ttopology = Topology{\n\t\tOverlay: t.BaseAddr,\n\t\tConnected: t.Connected,\n\t\tPopulation: t.Population,\n\t\tNnLowWatermark: t.NnLowWatermark,\n\t\tDepth: t.Depth,\n\t\tBins: make(map[string]Bin),\n\t}\n\n\tfor k, b := range t.Bins {\n\t\tif b.Population > 0 {\n\t\t\ttopology.Bins[k] = Bin{\n\t\t\t\tConnected: b.Connected,\n\t\t\t\tConnectedPeers: b.ConnectedPeers,\n\t\t\t\tDisconnectedPeers: b.DisconnectedPeers,\n\t\t\t\tPopulation: b.Population,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}", "func NewResourceNode(networkID string, pubKey crypto.PubKey, ownerAddr sdk.AccAddress,\n\tdescription Description, nodeType string, creationTime time.Time) ResourceNode {\n\treturn ResourceNode{\n\t\tNetworkID: networkID,\n\t\tPubKey: pubKey,\n\t\tSuspend: false,\n\t\tStatus: sdk.Unbonded,\n\t\tTokens: sdk.ZeroInt(),\n\t\tOwnerAddress: ownerAddr,\n\t\tDescription: description,\n\t\tNodeType: nodeType,\n\t\tCreationTime: creationTime,\n\t}\n}", "func NewResource(typ string) *Resource {\n\tswitch typ {\n\tcase \"networkmap\":\n\t\treturn &Resource{Data: &NetworkMap{Map: make(map[string]EndpointAddrGroup)}}\n\tcase \"costmap\":\n\t\treturn &Resource{Data: &CostMap{Map: make(map[string]DstCosts)}}\n\tdefault:\n\t\treturn &Resource{}\n\t}\n}", "func (zic *ZoneInterconnectHandler) createLocalZoneNodeResources(node *corev1.Node, nodeID int) error {\n\tnodeTransitSwitchPortIPs, err := util.ParseNodeTransitSwitchPortAddrs(node)\n\tif err != nil || len(nodeTransitSwitchPortIPs) == 0 {\n\t\treturn fmt.Errorf(\"failed to get the node transit switch port ips for node %s: %w\", node.Name, err)\n\t}\n\n\ttransitRouterPortMac := util.IPAddrToHWAddr(nodeTransitSwitchPortIPs[0].IP)\n\tvar transitRouterPortNetworks []string\n\tfor _, ip := range nodeTransitSwitchPortIPs {\n\t\ttransitRouterPortNetworks = append(transitRouterPortNetworks, ip.String())\n\t}\n\n\t// Connect transit switch to the cluster router by creating a pair of logical switch port - logical router port\n\tlogicalRouterPortName := zic.GetNetworkScopedName(types.RouterToTransitSwitchPrefix + node.Name)\n\tlogicalRouterPort := nbdb.LogicalRouterPort{\n\t\tName: logicalRouterPortName,\n\t\tMAC: transitRouterPortMac.String(),\n\t\tNetworks: transitRouterPortNetworks,\n\t\tOptions: map[string]string{\n\t\t\t\"mcast_flood\": \"true\",\n\t\t},\n\t}\n\tlogicalRouter := nbdb.LogicalRouter{\n\t\tName: zic.networkClusterRouterName,\n\t}\n\n\tif err := libovsdbops.CreateOrUpdateLogicalRouterPort(zic.nbClient, &logicalRouter, &logicalRouterPort, nil); err != nil {\n\t\treturn fmt.Errorf(\"failed to create/update cluster router %s to add transit switch port %s for the node %s: %w\", zic.networkClusterRouterName, logicalRouterPortName, node.Name, err)\n\t}\n\n\tlspOptions := map[string]string{\n\t\t\"router-port\": logicalRouterPortName,\n\t\t\"requested-tnl-key\": strconv.Itoa(nodeID),\n\t}\n\n\t// Store the node name in the external_ids column for book keeping\n\texternalIDs := map[string]string{\n\t\t\"node\": node.Name,\n\t}\n\terr = zic.addNodeLogicalSwitchPort(zic.networkTransitSwitchName, zic.GetNetworkScopedName(types.TransitSwitchToRouterPrefix+node.Name),\n\t\tlportTypeRouter, []string{lportTypeRouterAddr}, lspOptions, externalIDs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Its possible that node is moved from a remote zone to the local zone. Check and delete the remote zone routes\n\t// for this node as it's no longer needed.\n\treturn zic.deleteLocalNodeStaticRoutes(node, nodeID, nodeTransitSwitchPortIPs)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CreateNodeResourceTopologies creates the NodeResourceTopology in the cluster if the CRD doesn't exists already. Returns the CRD golang object present in the cluster.
func CreateNodeResourceTopologies(ctx context.Context, extClient extclient.Interface) (*apiextensionsv1.CustomResourceDefinition, error) { crd, err := NewNodeResourceTopologies() if err != nil { return nil, err } // Delete existing CRD (if any) with this we also get rid of stale objects err = extClient.ApiextensionsV1().CustomResourceDefinitions().Delete(ctx, crd.Name, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { return nil, fmt.Errorf("failed to delete NodeResourceTopology CRD: %w", err) } // It takes time for the delete operation, wait until the CRD completely gone if err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 1*time.Minute, true, func(ctx context.Context) (bool, error) { _, err = extClient.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, crd.Name, metav1.GetOptions{}) if err == nil { return false, nil } if errors.IsNotFound(err) { return true, nil } return false, err }); err != nil { return nil, fmt.Errorf("failed to get NodeResourceTopology CRD: %w", err) } return extClient.ApiextensionsV1().CustomResourceDefinitions().Create(ctx, crd, metav1.CreateOptions{}) }
[ "func NewNodeResourceTopologies() (*apiextensionsv1.CustomResourceDefinition, error) {\n\t_, file, _, ok := runtime.Caller(0)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"cannot retrieve manifests directory\")\n\t}\n\n\tbaseDir := filepath.Dir(file)\n\tcrdPath := filepath.Clean(filepath.Join(baseDir, \"..\", \"..\", \"..\", \"deployment\", \"base\", \"noderesourcetopologies-crd\", \"noderesourcetopologies.yaml\"))\n\n\tdata, err := os.ReadFile(crdPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdecode := scheme.Codecs.UniversalDeserializer().Decode\n\tobj, _, err := decode(data, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcrd, ok := obj.(*apiextensionsv1.CustomResourceDefinition)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unexpected type, got %t\", obj)\n\t}\n\treturn crd, nil\n}", "func GetNodeTopology(ctx context.Context, topologyClient *topologyclientset.Clientset, nodeName string) *v1alpha2.NodeResourceTopology {\n\tvar nodeTopology *v1alpha2.NodeResourceTopology\n\tvar err error\n\tgomega.EventuallyWithOffset(1, func() bool {\n\t\tnodeTopology, err = topologyClient.TopologyV1alpha2().NodeResourceTopologies().Get(ctx, nodeName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tframework.Logf(\"failed to get the node topology resource: %v\", err)\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}, time.Minute, 5*time.Second).Should(gomega.BeTrue())\n\treturn nodeTopology\n}", "func createResources() templateResources {\n\tres := templateResources{\n\t\tVariables: make(map[string]interface{}),\n\t\tCluster: &kubermaticv1.Cluster{\n\t\t\tSpec: kubermaticv1.ClusterSpec{\n\t\t\t\tClusterNetwork: kubermaticv1.ClusterNetworkingConfig{\n\t\t\t\t\tPods: kubermaticv1.NetworkRanges{\n\t\t\t\t\t\tCIDRBlocks: []string{\n\t\t\t\t\t\t\t\"172.25.0.0/16\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tDNSClusterIP: \"1.2.3.4\",\n\t}\n\tres.Variables[\"NodeAccessNetwork\"] = \"172.26.0.0/16\"\n\treturn res\n}", "func NewResourceNode(networkID string, pubKey crypto.PubKey, ownerAddr sdk.AccAddress,\n\tdescription Description, nodeType string, creationTime time.Time) ResourceNode {\n\treturn ResourceNode{\n\t\tNetworkID: networkID,\n\t\tPubKey: pubKey,\n\t\tSuspend: false,\n\t\tStatus: sdk.Unbonded,\n\t\tTokens: sdk.ZeroInt(),\n\t\tOwnerAddress: ownerAddr,\n\t\tDescription: description,\n\t\tNodeType: nodeType,\n\t\tCreationTime: creationTime,\n\t}\n}", "func (zic *ZoneInterconnectHandler) createRemoteZoneNodeResources(node *corev1.Node, nodeID int, chassisId string) error {\n\tnodeTransitSwitchPortIPs, err := util.ParseNodeTransitSwitchPortAddrs(node)\n\tif err != nil || len(nodeTransitSwitchPortIPs) == 0 {\n\t\treturn fmt.Errorf(\"failed to get the node transit switch port Ips : %w\", err)\n\t}\n\n\tnetworkId, err := util.ParseNetworkIDAnnotation(node, zic.GetNetworkName())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get the network id for the network %s on node %s: %v\", zic.GetNetworkName(), node.Name, err)\n\t}\n\n\ttransitSwitchTunnelKey := BaseTransitSwitchTunnelKey + networkId\n\tts := &nbdb.LogicalSwitch{\n\t\tName: zic.networkTransitSwitchName,\n\t\tOtherConfig: map[string]string{\n\t\t\t\"interconn-ts\": zic.networkTransitSwitchName,\n\t\t\t\"requested-tnl-key\": strconv.Itoa(transitSwitchTunnelKey),\n\t\t\t\"mcast_snoop\": \"true\",\n\t\t\t\"mcast_flood_unregistered\": \"true\",\n\t\t},\n\t}\n\n\t// Create transit switch if it doesn't exist\n\tif err := libovsdbops.CreateOrUpdateLogicalSwitch(zic.nbClient, ts); err != nil {\n\t\treturn fmt.Errorf(\"failed to create/update transit switch %s: %w\", zic.networkTransitSwitchName, err)\n\t}\n\n\ttransitRouterPortMac := util.IPAddrToHWAddr(nodeTransitSwitchPortIPs[0].IP)\n\tvar transitRouterPortNetworks []string\n\tfor _, ip := range nodeTransitSwitchPortIPs {\n\t\ttransitRouterPortNetworks = append(transitRouterPortNetworks, ip.String())\n\t}\n\n\tremotePortAddr := transitRouterPortMac.String()\n\tfor _, tsNetwork := range transitRouterPortNetworks {\n\t\tremotePortAddr = remotePortAddr + \" \" + tsNetwork\n\t}\n\n\tlspOptions := map[string]string{\n\t\t\"requested-tnl-key\": strconv.Itoa(nodeID),\n\t}\n\t// Store the node name in the external_ids column for book keeping\n\texternalIDs := map[string]string{\n\t\t\"node\": node.Name,\n\t}\n\n\tremotePortName := zic.GetNetworkScopedName(types.TransitSwitchToRouterPrefix + node.Name)\n\tif err := zic.addNodeLogicalSwitchPort(zic.networkTransitSwitchName, remotePortName, lportTypeRemote, []string{remotePortAddr}, lspOptions, externalIDs); err != nil {\n\t\treturn err\n\t}\n\t// Set the port binding chassis.\n\tif err := zic.setRemotePortBindingChassis(node.Name, remotePortName, chassisId); err != nil {\n\t\treturn err\n\t}\n\n\tif err := zic.addRemoteNodeStaticRoutes(node, nodeTransitSwitchPortIPs); err != nil {\n\t\treturn err\n\t}\n\n\t// Cleanup the logical router port connecting to the transit switch for the remote node (if present)\n\t// Cleanup would be required when a local zone node moves to a remote zone.\n\treturn zic.cleanupNodeClusterRouterPort(node.Name)\n}", "func (zic *ZoneInterconnectHandler) createRemoteZoneNodeResources(node *corev1.Node, nodeID int, chassisId string) error {\n\tnodeTransitSwitchPortIPs, err := util.ParseNodeTransitSwitchPortAddrs(node)\n\tif err != nil || len(nodeTransitSwitchPortIPs) == 0 {\n\t\treturn fmt.Errorf(\"failed to get the node transit switch port Ips : %w\", err)\n\t}\n\n\ttransitRouterPortMac := util.IPAddrToHWAddr(nodeTransitSwitchPortIPs[0].IP)\n\tvar transitRouterPortNetworks []string\n\tfor _, ip := range nodeTransitSwitchPortIPs {\n\t\ttransitRouterPortNetworks = append(transitRouterPortNetworks, ip.String())\n\t}\n\n\tremotePortAddr := transitRouterPortMac.String()\n\tfor _, tsNetwork := range transitRouterPortNetworks {\n\t\tremotePortAddr = remotePortAddr + \" \" + tsNetwork\n\t}\n\n\tlspOptions := map[string]string{\n\t\t\"requested-tnl-key\": strconv.Itoa(nodeID),\n\t}\n\t// Store the node name in the external_ids column for book keeping\n\texternalIDs := map[string]string{\n\t\t\"node\": node.Name,\n\t}\n\n\tremotePortName := zic.GetNetworkScopedName(types.TransitSwitchToRouterPrefix + node.Name)\n\tif err := zic.addNodeLogicalSwitchPort(zic.networkTransitSwitchName, remotePortName, lportTypeRemote, []string{remotePortAddr}, lspOptions, externalIDs); err != nil {\n\t\treturn err\n\t}\n\t// Set the port binding chassis.\n\tif err := zic.setRemotePortBindingChassis(node.Name, remotePortName, chassisId); err != nil {\n\t\treturn err\n\t}\n\n\tif err := zic.addRemoteNodeStaticRoutes(node, nodeTransitSwitchPortIPs); err != nil {\n\t\treturn err\n\t}\n\n\t// Cleanup the logical router port connecting to the transit switch for the remote node (if present)\n\t// Cleanup would be required when a local zone node moves to a remote zone.\n\treturn zic.cleanupNodeClusterRouterPort(node.Name)\n}", "func (c *Calcium) NodeResource(ctx context.Context, nodename string, fix bool) (*types.NodeResourceInfo, error) {\n\tlogger := log.WithFunc(\"calcium.NodeResource\").WithField(\"node\", nodename).WithField(\"fix\", fix)\n\tnr, err := c.doGetNodeResource(ctx, nodename, true, fix)\n\tlogger.Error(ctx, err)\n\treturn nr, err\n}", "func (r *Reconciler) createResource(ctx context.Context, resourceName string, serverClient k8sclient.Client) (runtime.Object, error) {\n\tif r.extraParams == nil {\n\t\tr.extraParams = map[string]string{}\n\t}\n\tr.extraParams[\"MonitoringKey\"] = r.Config.GetLabelSelector()\n\tr.extraParams[\"Namespace\"] = r.Config.GetOperatorNamespace()\n\n\ttemplateHelper := NewTemplateHelper(r.extraParams)\n\tresource, err := templateHelper.CreateResource(resourceName)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"createResource failed: %w\", err)\n\t}\n\n\tmetaObj, err := meta.Accessor(resource)\n\tif err == nil {\n\t\towner.AddIntegreatlyOwnerAnnotations(metaObj, r.installation)\n\t}\n\n\terr = serverClient.Create(ctx, resource)\n\tif err != nil {\n\t\tif !k8serr.IsAlreadyExists(err) {\n\t\t\treturn nil, fmt.Errorf(\"error creating resource: %w\", err)\n\t\t}\n\t}\n\n\treturn resource, nil\n}", "func (router *Router) createResource(w http.ResponseWriter, r *http.Request) {\n\tclusterName := r.URL.Query().Get(\"cluster\")\n\tnamespace := r.URL.Query().Get(\"namespace\")\n\tname := r.URL.Query().Get(\"name\")\n\tresource := r.URL.Query().Get(\"resource\")\n\tsubResource := r.URL.Query().Get(\"subResource\")\n\tpath := r.URL.Query().Get(\"path\")\n\n\tlog.WithFields(logrus.Fields{\"cluster\": clusterName, \"namespace\": namespace, \"name\": name, \"path\": path, \"resource\": resource, \"subResource\": subResource}).Tracef(\"createResource\")\n\n\tcluster := router.clusters.GetCluster(clusterName)\n\tif cluster == nil {\n\t\terrresponse.Render(w, r, nil, http.StatusBadRequest, \"Invalid cluster name\")\n\t\treturn\n\t}\n\n\tif router.isForbidden(resource) {\n\t\terrresponse.Render(w, r, nil, http.StatusForbidden, fmt.Sprintf(\"Access for resource %s is forbidding\", resource))\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\terrresponse.Render(w, r, err, http.StatusBadRequest, \"Could not decode request body\")\n\t\treturn\n\t}\n\n\terr = cluster.CreateResource(r.Context(), namespace, name, path, resource, subResource, body)\n\tif err != nil {\n\t\terrresponse.Render(w, r, err, http.StatusBadRequest, \"Could not create resource\")\n\t\treturn\n\t}\n\n\trender.JSON(w, r, nil)\n}", "func (zic *ZoneInterconnectHandler) createLocalZoneNodeResources(node *corev1.Node, nodeID int) error {\n\tnodeTransitSwitchPortIPs, err := util.ParseNodeTransitSwitchPortAddrs(node)\n\tif err != nil || len(nodeTransitSwitchPortIPs) == 0 {\n\t\treturn fmt.Errorf(\"failed to get the node transit switch port ips for node %s: %w\", node.Name, err)\n\t}\n\n\ttransitRouterPortMac := util.IPAddrToHWAddr(nodeTransitSwitchPortIPs[0].IP)\n\tvar transitRouterPortNetworks []string\n\tfor _, ip := range nodeTransitSwitchPortIPs {\n\t\ttransitRouterPortNetworks = append(transitRouterPortNetworks, ip.String())\n\t}\n\n\t// Connect transit switch to the cluster router by creating a pair of logical switch port - logical router port\n\tlogicalRouterPortName := zic.GetNetworkScopedName(types.RouterToTransitSwitchPrefix + node.Name)\n\tlogicalRouterPort := nbdb.LogicalRouterPort{\n\t\tName: logicalRouterPortName,\n\t\tMAC: transitRouterPortMac.String(),\n\t\tNetworks: transitRouterPortNetworks,\n\t\tOptions: map[string]string{\n\t\t\t\"mcast_flood\": \"true\",\n\t\t},\n\t}\n\tlogicalRouter := nbdb.LogicalRouter{\n\t\tName: zic.networkClusterRouterName,\n\t}\n\n\tif err := libovsdbops.CreateOrUpdateLogicalRouterPort(zic.nbClient, &logicalRouter, &logicalRouterPort, nil); err != nil {\n\t\treturn fmt.Errorf(\"failed to create/update cluster router %s to add transit switch port %s for the node %s: %w\", zic.networkClusterRouterName, logicalRouterPortName, node.Name, err)\n\t}\n\n\tlspOptions := map[string]string{\n\t\t\"router-port\": logicalRouterPortName,\n\t\t\"requested-tnl-key\": strconv.Itoa(nodeID),\n\t}\n\n\t// Store the node name in the external_ids column for book keeping\n\texternalIDs := map[string]string{\n\t\t\"node\": node.Name,\n\t}\n\terr = zic.addNodeLogicalSwitchPort(zic.networkTransitSwitchName, zic.GetNetworkScopedName(types.TransitSwitchToRouterPrefix+node.Name),\n\t\tlportTypeRouter, []string{lportTypeRouterAddr}, lspOptions, externalIDs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Its possible that node is moved from a remote zone to the local zone. Check and delete the remote zone routes\n\t// for this node as it's no longer needed.\n\treturn zic.deleteLocalNodeStaticRoutes(node, nodeID, nodeTransitSwitchPortIPs)\n}", "func NewTerraNodeResource(node db.Node) TerraNodeResource {\n\treturn TerraNodeResource{\n\t\tJAID: NewJAIDInt32(node.ID),\n\t\tName: node.Name,\n\t\tTerraChainID: node.TerraChainID,\n\t\tTendermintURL: node.TendermintURL,\n\t\tCreatedAt: node.CreatedAt,\n\t\tUpdatedAt: node.UpdatedAt,\n\t}\n}", "func (cc *ContrailCommand) CreateNode(host vcenter.ESXIHost) error {\n\tlog.Debug(\"Create Node:\", cc.AuthToken)\n\tnodeResource := contrailCommandNodeSync{\n\t\tResources: []*nodeResources{\n\t\t\t{\n\t\t\t\tKind: \"node\",\n\t\t\t\tData: &nodeData{\n\t\t\t\t\tNodeType: \"esxi\",\n\t\t\t\t\tUUID: host.UUID,\n\t\t\t\t\tHostname: host.Hostname,\n\t\t\t\t\tFqName: []string{\"default-global-system-config\", host.Hostname},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tjsonData, err := json.Marshal(nodeResource)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Debug(\"Sending Request\")\n\tresp, _, err := cc.sendRequest(\"/sync\", string(jsonData), \"POST\") //nolint: bodyclose\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Debug(\"Got status : \", resp.StatusCode)\n\tswitch resp.StatusCode {\n\tdefault:\n\t\treturn fmt.Errorf(\"resource creation failed, %d\", resp.StatusCode)\n\tcase 200, 201:\n\t}\n\treturn nil\n}", "func CreateCRD(resource apiextensions.CustomResource) error {\n\tscope := apiextensionsv1.NamespaceScoped\n\tif string(resource.Scope) == string(apiextensionsv1.ClusterScoped) {\n\t\tscope = apiextensionsv1.ClusterScoped\n\t}\n\tignoreSchemaValidation := true\n\tcrdName := fmt.Sprintf(\"%s.%s\", resource.Plural, resource.Group)\n\tcrd := &apiextensionsv1.CustomResourceDefinition{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: crdName,\n\t\t},\n\t\tSpec: apiextensionsv1.CustomResourceDefinitionSpec{\n\t\t\tGroup: resource.Group,\n\t\t\tVersions: []apiextensionsv1.CustomResourceDefinitionVersion{\n\t\t\t\t{Name: resource.Version,\n\t\t\t\t\tServed: true,\n\t\t\t\t\tStorage: true,\n\t\t\t\t\tSchema: &apiextensionsv1.CustomResourceValidation{\n\t\t\t\t\t\tOpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{\n\t\t\t\t\t\t\tXPreserveUnknownFields: &ignoreSchemaValidation,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tScope: scope,\n\t\t\tNames: apiextensionsv1.CustomResourceDefinitionNames{\n\t\t\t\tSingular: resource.Name,\n\t\t\t\tPlural: resource.Plural,\n\t\t\t\tKind: resource.Kind,\n\t\t\t\tShortNames: resource.ShortNames,\n\t\t\t},\n\t\t},\n\t}\n\terr := apiextensions.Instance().RegisterCRD(crd)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func NetworkResourceCreate(w http.ResponseWriter, r *http.Request) {\n\tvar req openapi.NetworkResourceCreateRequest\n\tif err := json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\thandleError(w, http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\n\tnetwork, err := ctr.CreateNetworkResource(&req)\n\tif err != nil {\n\t\thandleError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\n\thandleResult(w, http.StatusCreated, network)\n\treturn\n}", "func CreateResource(kubedata *krd.GenericKubeResourceData, kubeclient *kubernetes.Clientset) (string, error) {\n\tif kubedata.Namespace == \"\" {\n\t\tkubedata.Namespace = \"default\"\n\t}\n\n\tif _, err := os.Stat(kubedata.YamlFilePath); err != nil {\n\t\treturn \"\", pkgerrors.New(\"File \" + kubedata.YamlFilePath + \" not found\")\n\t}\n\n\tlog.Println(\"Reading deployment YAML\")\n\trawBytes, err := ioutil.ReadFile(kubedata.YamlFilePath)\n\tif err != nil {\n\t\treturn \"\", pkgerrors.Wrap(err, \"Deployment YAML file read error\")\n\t}\n\n\tlog.Println(\"Decoding deployment YAML\")\n\tdecode := scheme.Codecs.UniversalDeserializer().Decode\n\tobj, _, err := decode(rawBytes, nil, nil)\n\tif err != nil {\n\t\treturn \"\", pkgerrors.Wrap(err, \"Deserialize deployment error\")\n\t}\n\n\tswitch o := obj.(type) {\n\tcase *appsV1.Deployment:\n\t\tkubedata.DeploymentData = o\n\tdefault:\n\t\treturn \"\", pkgerrors.New(kubedata.YamlFilePath + \" contains another resource different than Deployment\")\n\t}\n\n\tkubedata.DeploymentData.Namespace = kubedata.Namespace\n\tkubedata.DeploymentData.Name = kubedata.InternalVNFID + \"-\" + kubedata.DeploymentData.Name\n\n\tresult, err := kubeclient.AppsV1().Deployments(kubedata.Namespace).Create(kubedata.DeploymentData)\n\tif err != nil {\n\t\treturn \"\", pkgerrors.Wrap(err, \"Create Deployment error\")\n\t}\n\n\treturn result.GetObjectMeta().GetName(), nil\n}", "func createNodePodsMap(ctx context.Context, client clientset.Interface, nodes []*v1.Node) NodePodsMap {\n\tnpm := NodePodsMap{}\n\tfor _, node := range nodes {\n\t\tpods, err := podutil.ListPodsOnANode(ctx, client, node, nil)\n\t\tif err != nil {\n\t\t\tklog.Warningf(\"node %s will not be processed, error in accessing its pods (%#v)\", node.Name, err)\n\t\t} else {\n\t\t\tnpm[node] = pods\n\t\t}\n\t}\n\treturn npm\n}", "func (zic *ZoneInterconnectHandler) createLocalZoneNodeResources(node *corev1.Node, nodeID int) error {\n\tnodeTransitSwitchPortIPs, err := util.ParseNodeTransitSwitchPortAddrs(node)\n\tif err != nil || len(nodeTransitSwitchPortIPs) == 0 {\n\t\treturn fmt.Errorf(\"failed to get the node transit switch port ips for node %s: %w\", node.Name, err)\n\t}\n\n\tnetworkId, err := util.ParseNetworkIDAnnotation(node, zic.GetNetworkName())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get the network id for the network %s on node %s: %v\", zic.GetNetworkName(), node.Name, err)\n\t}\n\n\ttransitRouterPortMac := util.IPAddrToHWAddr(nodeTransitSwitchPortIPs[0].IP)\n\tvar transitRouterPortNetworks []string\n\tfor _, ip := range nodeTransitSwitchPortIPs {\n\t\ttransitRouterPortNetworks = append(transitRouterPortNetworks, ip.String())\n\t}\n\n\ttransitSwitchTunnelKey := BaseTransitSwitchTunnelKey + networkId\n\tts := &nbdb.LogicalSwitch{\n\t\tName: zic.networkTransitSwitchName,\n\t\tOtherConfig: map[string]string{\n\t\t\t\"interconn-ts\": zic.networkTransitSwitchName,\n\t\t\t\"requested-tnl-key\": strconv.Itoa(transitSwitchTunnelKey),\n\t\t\t\"mcast_snoop\": \"true\",\n\t\t\t\"mcast_flood_unregistered\": \"true\",\n\t\t},\n\t}\n\n\t// Create transit switch if it doesn't exist\n\tif err := libovsdbops.CreateOrUpdateLogicalSwitch(zic.nbClient, ts); err != nil {\n\t\treturn fmt.Errorf(\"failed to create/update transit switch %s: %w\", zic.networkTransitSwitchName, err)\n\t}\n\n\t// Connect transit switch to the cluster router by creating a pair of logical switch port - logical router port\n\tlogicalRouterPortName := zic.GetNetworkScopedName(types.RouterToTransitSwitchPrefix + node.Name)\n\tlogicalRouterPort := nbdb.LogicalRouterPort{\n\t\tName: logicalRouterPortName,\n\t\tMAC: transitRouterPortMac.String(),\n\t\tNetworks: transitRouterPortNetworks,\n\t\tOptions: map[string]string{\n\t\t\t\"mcast_flood\": \"true\",\n\t\t},\n\t}\n\tlogicalRouter := nbdb.LogicalRouter{\n\t\tName: zic.networkClusterRouterName,\n\t}\n\n\tif err := libovsdbops.CreateOrUpdateLogicalRouterPort(zic.nbClient, &logicalRouter, &logicalRouterPort, nil); err != nil {\n\t\treturn fmt.Errorf(\"failed to create/update cluster router %s to add transit switch port %s for the node %s: %w\", zic.networkClusterRouterName, logicalRouterPortName, node.Name, err)\n\t}\n\n\tlspOptions := map[string]string{\n\t\t\"router-port\": logicalRouterPortName,\n\t\t\"requested-tnl-key\": strconv.Itoa(nodeID),\n\t}\n\n\t// Store the node name in the external_ids column for book keeping\n\texternalIDs := map[string]string{\n\t\t\"node\": node.Name,\n\t}\n\terr = zic.addNodeLogicalSwitchPort(zic.networkTransitSwitchName, zic.GetNetworkScopedName(types.TransitSwitchToRouterPrefix+node.Name),\n\t\tlportTypeRouter, []string{lportTypeRouterAddr}, lspOptions, externalIDs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Its possible that node is moved from a remote zone to the local zone. Check and delete the remote zone routes\n\t// for this node as it's no longer needed.\n\treturn zic.deleteLocalNodeStaticRoutes(node, nodeID, nodeTransitSwitchPortIPs)\n}", "func getNode(ctx context.Context, client client.Interface, nodeName string) *libapi.Node {\n\tnode, err := client.Nodes().Get(ctx, nodeName, options.GetOptions{})\n\tif err != nil {\n\t\tif _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {\n\t\t\tlog.WithError(err).WithField(\"Name\", nodeName).Info(\"Unable to query node configuration\")\n\t\t\tlog.Warn(\"Unable to access datastore to query node configuration\")\n\t\t\tutils.Terminate()\n\t\t}\n\n\t\tlog.WithField(\"Name\", nodeName).Info(\"Building new node resource\")\n\t\tnode = libapi.NewNode()\n\t\tnode.Name = nodeName\n\t}\n\n\treturn node\n}", "func CrdCreate(crd *CrdType) {\n\tutil.KubeCreateSkipExisting(crd)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetNodeTopology returns the NodeResourceTopology data for the node identified by `nodeName`.
func GetNodeTopology(ctx context.Context, topologyClient *topologyclientset.Clientset, nodeName string) *v1alpha2.NodeResourceTopology { var nodeTopology *v1alpha2.NodeResourceTopology var err error gomega.EventuallyWithOffset(1, func() bool { nodeTopology, err = topologyClient.TopologyV1alpha2().NodeResourceTopologies().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { framework.Logf("failed to get the node topology resource: %v", err) return false } return true }, time.Minute, 5*time.Second).Should(gomega.BeTrue()) return nodeTopology }
[ "func GetTopology(ctx *sgo.Context) error {\n\ttimeRange, now := range2stamp(ctx.Param(\"range\"))\n\tnodeList, err := modelGetTopology(ctx.Param(\"gateway\"), timeRange, now)\n\tif err != nil {\n\t\treturn ctx.JSON(500, 0, err.Error(), nil)\n\t}\n\tif len(nodeList) == 0 {\n\t\treturn ctx.JSON(200, 0, \"no result found\", nil)\n\t}\n\treturn ctx.JSON(200, 1, \"success\", nodeList)\n}", "func IsValidNodeTopology(nodeTopology *v1alpha2.NodeResourceTopology, kubeletConfig *kubeletconfig.KubeletConfiguration) bool {\n\tif nodeTopology == nil || len(nodeTopology.TopologyPolicies) == 0 {\n\t\tframework.Logf(\"failed to get topology policy from the node topology resource\")\n\t\treturn false\n\t}\n\n\ttmPolicy := string(topologypolicy.DetectTopologyPolicy(kubeletConfig.TopologyManagerPolicy, kubeletConfig.TopologyManagerScope))\n\tif nodeTopology.TopologyPolicies[0] != tmPolicy {\n\t\tframework.Logf(\"topology policy mismatch got %q expected %q\", nodeTopology.TopologyPolicies[0], tmPolicy)\n\t\treturn false\n\t}\n\n\texpectedPolicyAttribute := v1alpha2.AttributeInfo{\n\t\tName: nfdtopologyupdater.TopologyManagerPolicyAttributeName,\n\t\tValue: kubeletConfig.TopologyManagerPolicy,\n\t}\n\tif !containsAttribute(nodeTopology.Attributes, expectedPolicyAttribute) {\n\t\tframework.Logf(\"topology policy attributes don't have correct topologyManagerPolicy attribute expected %v attributeList %v\", expectedPolicyAttribute, nodeTopology.Attributes)\n\t\treturn false\n\t}\n\n\texpectedScopeAttribute := v1alpha2.AttributeInfo{\n\t\tName: nfdtopologyupdater.TopologyManagerScopeAttributeName,\n\t\tValue: kubeletConfig.TopologyManagerScope,\n\t}\n\tif !containsAttribute(nodeTopology.Attributes, expectedScopeAttribute) {\n\t\tframework.Logf(\"topology policy attributes don't have correct topologyManagerScope attribute expected %v attributeList %v\", expectedScopeAttribute, nodeTopology.Attributes)\n\t\treturn false\n\t}\n\n\tif nodeTopology.Zones == nil || len(nodeTopology.Zones) == 0 {\n\t\tframework.Logf(\"failed to get topology zones from the node topology resource\")\n\t\treturn false\n\t}\n\n\tfoundNodes := 0\n\tfor _, zone := range nodeTopology.Zones {\n\t\t// TODO constant not in the APIs\n\t\tif !strings.HasPrefix(strings.ToUpper(zone.Type), \"NODE\") {\n\t\t\tcontinue\n\t\t}\n\t\tfoundNodes++\n\n\t\tif !isValidCostList(zone.Name, zone.Costs) {\n\t\t\tframework.Logf(\"invalid cost list for zone %q\", zone.Name)\n\t\t\treturn false\n\t\t}\n\n\t\tif !isValidResourceList(zone.Name, zone.Resources) {\n\t\t\tframework.Logf(\"invalid resource list for zone %q\", zone.Name)\n\t\t\treturn false\n\t\t}\n\t}\n\treturn foundNodes > 0\n}", "func (p *Provider) GetTopology(ctx context.Context) (*hardware.Topology, error) {\n\tch := make(chan topologyResult)\n\tgo p.getTopologyAsync(ctx, ch)\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\tcase result := <-ch:\n\t\treturn result.topology, result.err\n\t}\n}", "func getTopologyFromPod(pod *v1.Pod, nodeList *v1.NodeList) (string, string, error) {\n\tfor _, node := range nodeList.Items {\n\t\tif pod.Spec.NodeName == node.Name {\n\t\t\tpodRegion := node.Labels[regionKey]\n\t\t\tpodZone := node.Labels[zoneKey]\n\t\t\treturn podRegion, podZone, nil\n\t\t}\n\t}\n\terr := errors.New(\"Could not find the topology from pod\")\n\treturn \"\", \"\", err\n}", "func (ctl *Ctl) GetTopology() *CtlTopology {\n\tctl.m.Lock()\n\trv := ctl.getTopologyLOCKED()\n\tctl.m.Unlock()\n\n\treturn rv\n}", "func (az *Cloud) GetNodeResourceGroup(nodeName string) (string, error) {\n\t// Kubelet won't set az.nodeInformerSynced, always return configured resourceGroup.\n\tif az.nodeInformerSynced == nil {\n\t\treturn az.ResourceGroup, nil\n\t}\n\n\taz.nodeCachesLock.RLock()\n\tdefer az.nodeCachesLock.RUnlock()\n\tif !az.nodeInformerSynced() {\n\t\treturn \"\", fmt.Errorf(\"node informer is not synced when trying to GetNodeResourceGroup\")\n\t}\n\n\t// Return external resource group if it has been cached.\n\tif cachedRG, ok := az.nodeResourceGroups[nodeName]; ok {\n\t\treturn cachedRG, nil\n\t}\n\n\t// Return resource group from cloud provider options.\n\treturn az.ResourceGroup, nil\n}", "func (cm *ClusterManager) GetTopologyNodes(ctx context.Context,\n\treq *cmproto.GetTopologyNodesRequest, resp *cmproto.GetTopologyNodesResponse) error {\n\treqID, err := requestIDFromContext(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tstart := time.Now()\n\tla := thirdparty.NewGetTopoNodesAction()\n\tla.Handle(ctx, req, resp)\n\tmetrics.ReportAPIRequestMetric(\"GetTopologyNodes\", \"grpc\", strconv.Itoa(int(resp.Code)), start)\n\tblog.V(3).Infof(\"reqID: %s, action: GetTopologyNodes, req %v\", reqID, req)\n\treturn nil\n}", "func (re *RequestExecutor) GetTopologyNodes() []*ServerNode {\n\tt := re.GetTopology()\n\tif t == nil || len(t.Nodes) == 0 {\n\t\treturn nil\n\t}\n\treturn append([]*ServerNode{}, t.Nodes...)\n}", "func (r *QueryRequest) getTopologyInfo() *topologyInfo {\n\tif r.PreparedStatement == nil {\n\t\treturn nil\n\t}\n\n\treturn r.PreparedStatement.topologyInfo\n}", "func GetNodeName(ctx context.Context) (string, bool) {\n\tif val := ctx.Value(nodeNameKey); val != nil {\n\t\tresult, ok := val.(string)\n\t\treturn result, ok\n\t}\n\treturn \"\", false\n}", "func (info *BaseEndpointInfo) GetNodeName() string {\n\treturn info.NodeName\n}", "func (c *Client) Topology(ctx context.Context) (topology Topology, err error) {\n\tvar t debugapi.Topology\n\tfor r := 0; r < c.retry; r++ {\n\t\ttime.Sleep(2 * time.Duration(r) * time.Second)\n\n\t\tt, err = c.debug.Node.Topology(ctx)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\tif err != nil {\n\t\treturn Topology{}, fmt.Errorf(\"get topology: %w\", err)\n\t}\n\n\ttopology = Topology{\n\t\tOverlay: t.BaseAddr,\n\t\tConnected: t.Connected,\n\t\tPopulation: t.Population,\n\t\tNnLowWatermark: t.NnLowWatermark,\n\t\tDepth: t.Depth,\n\t\tBins: make(map[string]Bin),\n\t}\n\n\tfor k, b := range t.Bins {\n\t\tif b.Population > 0 {\n\t\t\ttopology.Bins[k] = Bin{\n\t\t\t\tConnected: b.Connected,\n\t\t\t\tConnectedPeers: b.ConnectedPeers,\n\t\t\t\tDisconnectedPeers: b.DisconnectedPeers,\n\t\t\t\tPopulation: b.Population,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}", "func (info *endpointsInfo) GetNodeName() string {\n\treturn \"\"\n}", "func (m *ClusterService) getTopology(ctx context.Context, args struct{}) (*proto.GeneralResp, error) {\n\tif _, _, err := permissions(ctx, ADMIN); err != nil {\n\t\treturn nil, err\n\t}\n\ttv := &TopologyView{\n\t\tZones: make([]*ZoneView, 0),\n\t}\n\tzones := m.cluster.t.getAllZones()\n\tfor _, zone := range zones {\n\t\tcv := newZoneView(zone.name)\n\t\tcv.Status = zone.getStatusToString()\n\t\ttv.Zones = append(tv.Zones, cv)\n\t\tnsc := zone.getAllNodeSet()\n\t\tfor _, ns := range nsc {\n\t\t\tnsView := newNodeSetView(ns.dataNodeLen(), ns.metaNodeLen())\n\t\t\tcv.NodeSet[ns.ID] = nsView\n\t\t\tns.dataNodes.Range(func(key, value interface{}) bool {\n\t\t\t\tdataNode := value.(*DataNode)\n\t\t\t\tnsView.DataNodes = append(nsView.DataNodes, proto.NodeView{ID: dataNode.ID, Addr: dataNode.Addr, Status: dataNode.isActive, IsWritable: dataNode.isWriteAble()})\n\t\t\t\treturn true\n\t\t\t})\n\t\t\tns.metaNodes.Range(func(key, value interface{}) bool {\n\t\t\t\tmetaNode := value.(*MetaNode)\n\t\t\t\tnsView.MetaNodes = append(nsView.MetaNodes, proto.NodeView{ID: metaNode.ID, Addr: metaNode.Addr, Status: metaNode.IsActive, IsWritable: metaNode.isWritable()})\n\t\t\t\treturn true\n\t\t\t})\n\t\t}\n\t}\n\n\tbs, e := json.Marshal(tv)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn proto.Success(string(bs)), e\n}", "func (m *defaultManager) GetNodeByName(ctx context.Context, nodeName string) (*vsphere.VirtualMachine, error) {\n\tlog := logger.GetLogger(ctx)\n\tnodeUUID, found := m.nodeNameToUUID.Load(nodeName)\n\tif found && nodeUUID != nil && nodeUUID.(string) != \"\" {\n\t\treturn m.GetNode(ctx, nodeUUID.(string), nil)\n\t}\n\tlog.Infof(\"Empty nodeUUID observed in cache for the node: %q\", nodeName)\n\tk8snodeUUID, err := k8s.GetNodeUUID(ctx, m.k8sClient, nodeName)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to get node UUID from node: %q. Err: %v\", nodeName, err)\n\t\treturn nil, err\n\t}\n\tm.nodeNameToUUID.Store(nodeName, k8snodeUUID)\n\treturn m.GetNode(ctx, k8snodeUUID, nil)\n\n}", "func GetNodeCapabilityType(kv *api.KV, deploymentID, nodeName, capabilityName string) (string, error) {\n\t// Now look at capability type for default\n\tnodeType, err := GetNodeType(kv, deploymentID, nodeName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn GetNodeTypeCapabilityType(kv, deploymentID, nodeType, capabilityName)\n}", "func (s NodeService) GetCapacityByNodeName(ctx context.Context, name, deviceGroup string) (int64, error) {\n\tnode := new(corev1.Node)\n\terr := s.Get(ctx, client.ObjectKey{Name: name}, node)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tfor key, v := range node.Status.Allocatable {\n\t\tif string(key) == deviceGroup || string(key) == utils.DeviceCapacityKeyPrefix+deviceGroup {\n\t\t\treturn v.Value(), nil\n\t\t}\n\t}\n\treturn 0, errors.New(\"device group not found\")\n}", "func (rc *ResourceCache) GetNodeResources(nodeName string) []*Resource {\n\trc.lock.Lock()\n\tdefer rc.lock.Unlock()\n\tnode, ok := rc.Nodes[nodeName]\n\tif !ok {\n\t\treturn nil\n\t}\n\tretRes := make([]*Resource, 0)\n\tfor _, r := range node.Resources {\n\t\tretRes = append(retRes, r.DeepCopy())\n\t}\n\tsort.Slice(retRes, func(i, j int) bool {\n\t\treturn retRes[i].Key() < retRes[j].Key()\n\t})\n\treturn retRes\n}", "func (c *Client) GetNodeOperator() scheduler.NodeOperator {\n\treturn c.Node\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
AllocatableResourceListFromNodeResourceTopology extract the map zone:allocatableResources from the given NodeResourceTopology instance.
func AllocatableResourceListFromNodeResourceTopology(nodeTopo *v1alpha2.NodeResourceTopology) map[string]corev1.ResourceList { allocRes := make(map[string]corev1.ResourceList) for _, zone := range nodeTopo.Zones { if zone.Type != "Node" { continue } resList := make(corev1.ResourceList) for _, res := range zone.Resources { resList[corev1.ResourceName(res.Name)] = res.Allocatable.DeepCopy() } if len(resList) == 0 { continue } allocRes[zone.Name] = resList } return allocRes }
[ "func CompareAllocatableResources(expected, got map[string]corev1.ResourceList) (string, string, int, bool) {\n\tif len(got) != len(expected) {\n\t\tframework.Logf(\"-> expected=%v (len=%d) got=%v (len=%d)\", expected, len(expected), got, len(got))\n\t\treturn \"\", \"\", 0, false\n\t}\n\tfor expZoneName, expResList := range expected {\n\t\tgotResList, ok := got[expZoneName]\n\t\tif !ok {\n\t\t\treturn expZoneName, \"\", 0, false\n\t\t}\n\t\tif resName, cmp, ok := CompareResourceList(expResList, gotResList); !ok || cmp != 0 {\n\t\t\treturn expZoneName, resName, cmp, ok\n\t\t}\n\t}\n\treturn \"\", \"\", 0, true\n}", "func (*Resources) AllocResources(rqst AllocRequest) (alloc *Allocated, err kv.Error) {\n\n\talloc = &Allocated{}\n\n\t// Allocate the GPU resources first, they are typically the least available\n\tif alloc.GPU, err = AllocGPU(rqst.MaxGPU, rqst.MaxGPUMem, rqst.GPUDivisibles); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// CPU resources next\n\tif alloc.CPU, err = AllocCPU(rqst.MaxCPU, rqst.MaxMem); err != nil {\n\t\talloc.Release()\n\t\treturn nil, err\n\t}\n\n\t// Lastly, disk storage\n\tif alloc.Disk, err = AllocDisk(rqst.MaxDisk); err != nil {\n\t\talloc.Release()\n\t\treturn nil, err\n\t}\n\n\treturn alloc, nil\n}", "func (config *AppConfig) GetNodeAllocations(nodeID int) (allocations NodeAllocations, err error) {\n\t// Get allocation info from the panel\n\tallocBytes, err := config.queryApplicationAPI(fmt.Sprintf(\"nodes/%d/allocations?page=%d\", nodeID), \"get\", nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Unmarshal the bytes to a usable struct.\n\terr = json.Unmarshal(allocBytes, &allocations)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor i := 1; i >= allocations.Meta.Pagination.TotalPages; i++ {\n\t\tallocations, err := config.getNodeAllocationsByPage(nodeID, i)\n\t\tif err != nil {\n\t\t\treturn allocations, err\n\t\t}\n\t\tfor _, allocation := range allocations.Allocations {\n\t\t\tallocations.Allocations = append(allocations.Allocations, allocation)\n\t\t}\n\t}\n\n\treturn\n}", "func (r *LocalResource) Allocated() ([]ResourceAllocation, error) {\n\tallocations := []ResourceAllocation{}\n\tfor i := 0; i < r.count; i++ {\n\t\tallocation := ResourceAllocation{\n\t\t\tName: fmt.Sprintf(\"instance-%d\", i+1),\n\t\t}\n\t\tallocations = append(allocations, allocation)\n\t}\n\n\treturn allocations, nil\n}", "func dataSourceVolterraAddressAllocator() *schema.Resource {\n\treturn &schema.Resource{\n\t\tRead: dataSourceAddressAllocatorRead,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"name\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\t\"namespace\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\t\"mode\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"allocation_map\": {\n\t\t\t\tType: schema.TypeMap,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t},\n\t}\n}", "func GetNumPodsAllocatable(node *api.Node) float64 {\n\t// Compute both the available IP address range and the maxpods set on the node.\n\t// Pick the smaller limit.\n\tvar allocatableIPAddresses float64\n\tif node.Spec.PodCIDR != \"\" {\n\t\t// This also validates the CIDR string for malformed values, although unlikely.\n\t\t_, podNet, err := net.ParseCIDR(node.Spec.PodCIDR)\n\t\tif err != nil {\n\t\t\tglog.Warning(\"Error parsing node CIDR:\", err)\n\t\t} else {\n\t\t\tallocatableIPAddresses = getAllocatableIPAddresses(podNet)\n\t\t\tglog.V(4).Infof(\"Allocatable pod IP addresses: %f on node: %s\", allocatableIPAddresses, node.Name)\n\t\t}\n\t}\n\n\tallocatablePods := float64(node.Status.Allocatable.Pods().Value())\n\tglog.V(4).Infof(\"Allocatable maxPods: %f on node: %s\", allocatablePods, node.Name)\n\n\tif allocatableIPAddresses != 0 && allocatableIPAddresses < allocatablePods {\n\t\treturn allocatableIPAddresses\n\t}\n\treturn allocatablePods\n}", "func ResourceToResourceList(r *schedulerframework.Resource) apiv1.ResourceList {\n\tresult := apiv1.ResourceList{\n\t\tapiv1.ResourceCPU: *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI),\n\t\tapiv1.ResourceMemory: *resource.NewQuantity(r.Memory, resource.BinarySI),\n\t\tapiv1.ResourcePods: *resource.NewQuantity(int64(r.AllowedPodNumber), resource.BinarySI),\n\t\tapiv1.ResourceEphemeralStorage: *resource.NewQuantity(r.EphemeralStorage, resource.BinarySI),\n\t}\n\tfor rName, rQuant := range r.ScalarResources {\n\t\tif isHugePageResourceName(rName) {\n\t\t\tresult[rName] = *resource.NewQuantity(rQuant, resource.BinarySI)\n\t\t} else {\n\t\t\tresult[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)\n\t\t}\n\t}\n\treturn result\n}", "func (rrm *ResourceReservationManager) GetReservedResources() resources.NodeGroupResources {\n\tresourceReservations := rrm.resourceReservations.List()\n\tusage := resources.UsageForNodes(resourceReservations)\n\tusage.Add(rrm.softReservationStore.UsedSoftReservationResources())\n\treturn usage\n}", "func (a *ResourcepoolApiService) GetResourcepoolLeaseResourceList(ctx context.Context) ApiGetResourcepoolLeaseResourceListRequest {\n\treturn ApiGetResourcepoolLeaseResourceListRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func GetLabeledReservedResourcesFromResources(\n\tresources []*mesos.Resource) map[string]*ReservedResources {\n\n\treservedResources := make(map[string]*ReservedResources)\n\tfor _, res := range resources {\n\t\tif res.GetRole() == \"\" ||\n\t\t\tres.GetRole() == unreservedRole ||\n\t\t\tres.GetReservation().GetLabels() == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// TODO: only extract uuid field as reservation key.\n\t\tresLabels := res.GetReservation().GetLabels().String()\n\t\tif _, ok := reservedResources[resLabels]; !ok {\n\t\t\treservedResources[resLabels] = &ReservedResources{\n\t\t\t\tResources: scalar.Resources{},\n\t\t\t}\n\t\t}\n\n\t\tif res.GetDisk() != nil {\n\t\t\tvolumeID := res.GetDisk().GetPersistence().GetId()\n\t\t\treservedResources[resLabels].Volumes = append(\n\t\t\t\treservedResources[resLabels].Volumes,\n\t\t\t\tvolumeID)\n\t\t\tcontinue\n\t\t}\n\n\t\tresResource := scalar.FromMesosResource(res)\n\t\treservedResources[resLabels].Resources = reservedResources[resLabels].Resources.Add(\n\t\t\tresResource)\n\t}\n\treturn reservedResources\n}", "func (rc *ResourceCache) GetNodeResources(nodeName string) []*Resource {\n\trc.lock.Lock()\n\tdefer rc.lock.Unlock()\n\tnode, ok := rc.Nodes[nodeName]\n\tif !ok {\n\t\treturn nil\n\t}\n\tretRes := make([]*Resource, 0)\n\tfor _, r := range node.Resources {\n\t\tretRes = append(retRes, r.DeepCopy())\n\t}\n\tsort.Slice(retRes, func(i, j int) bool {\n\t\treturn retRes[i].Key() < retRes[j].Key()\n\t})\n\treturn retRes\n}", "func (p *ResourcePool) Allocs(ctx context.Context) ([]Alloc, error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tallocs := make([]Alloc, len(p.allocs))\n\ti := 0\n\tfor _, a := range p.allocs {\n\t\tallocs[i] = a\n\t\ti++\n\t}\n\treturn allocs, nil\n}", "func resourcesNode(node *yaml.Node) yit.Iterator {\n\treturn yit.FromNode(node).\n\t\t// Access nested 'resources' attribute\n\t\tValuesForMap(\n\t\t\tyit.WithStringValue(\"resources\"),\n\t\t\tyit.WithKind(yaml.SequenceNode),\n\t\t)\n}", "func buildNodeCapacityMap(acs []accrd.AvailableCapacity) NodeCapacityMap {\n\tcapMap := NodeCapacityMap{}\n\tfor _, ac := range acs {\n\t\tac := ac\n\t\tif _, ok := capMap[ac.Spec.NodeId]; !ok {\n\t\t\tcapMap[ac.Spec.NodeId] = ACMap{}\n\t\t}\n\t\tcapMap[ac.Spec.NodeId][ac.Name] = &ac\n\t}\n\treturn capMap\n}", "func (n *resPool) GetTotalAllocatedResources() *scalar.Resources {\n\tn.RLock()\n\tdefer n.RUnlock()\n\treturn n.allocation.GetByType(scalar.TotalAllocation)\n}", "func buildNormalizedNodeResourceMap(pods []*core_v1.Pod, nodes []*core_v1.Node) nodeResourceMap { // nolint: gocyclo\n\tnrm := nodeResourceMap{}\n\n\tfor _, n := range nodes {\n\t\tnrm[n.ObjectMeta.Name] = allocatedNodeResources{node: n}\n\t}\n\n\t// We sum the total allocated resources on every node from our list of pods.\n\t// Some strategies may wish to price pods based on their fraction of allocated\n\t// node resources, rather than the total resources available on a node. This\n\t// may punish lone pods that are initially scheduled onto large nodes, but this\n\t// may be desirable as it rightfully punishes applications that may cause\n\t// frequent node turnover.\n\tfor _, p := range pods {\n\t\tnr, ok := nrm[p.Spec.NodeName]\n\t\tif !ok {\n\t\t\tlog.Log.Warnw(\"unexpected missing node from NodeMap\", zap.String(\"nodeName\", p.Spec.NodeName))\n\t\t\tcontinue\n\t\t}\n\t\tnr.cpuUsed += sumPodResource(p, core_v1.ResourceCPU)\n\t\tnr.memoryUsed += sumPodResource(p, core_v1.ResourceMemory)\n\t\tnr.gpuUsed += sumPodResource(p, ResourceGPU)\n\t\tnrm[p.Spec.NodeName] = nr\n\t}\n\n\tfor k, v := range nrm {\n\t\tc := v.node.Status.Capacity.Cpu()\n\t\tif c != nil {\n\t\t\tv.cpuAvailable = c.MilliValue()\n\t\t}\n\n\t\tm := v.node.Status.Capacity.Memory()\n\t\tif m != nil {\n\t\t\tv.memoryAvailable = m.Value()\n\t\t}\n\n\t\tg := gpuCapacity(&v.node.Status.Capacity)\n\t\tif g != nil {\n\t\t\tv.gpuAvailable = g.Value()\n\t\t}\n\n\t\t// The ratio of cpuUsed / cpuAvailable is used for proportional scaling of\n\t\t// resources to \"normalize\" pod resource utilization to a full node. If\n\t\t// cpuUsed is 0 because the pods that are running have not made resource\n\t\t// requests, there's a possible divide by 0 in calling code so we default to\n\t\t// setting cpuUsed to cpuAvailable.\n\t\tif v.cpuUsed == 0 {\n\t\t\tv.cpuUsed = v.cpuAvailable\n\t\t}\n\n\t\tif v.memoryUsed == 0 {\n\t\t\tv.memoryUsed = v.memoryAvailable\n\t\t}\n\n\t\tif v.gpuUsed == 0 {\n\t\t\tv.gpuUsed = v.gpuAvailable\n\t\t}\n\n\t\tnrm[k] = v\n\t}\n\n\treturn nrm\n}", "func (p *MackerelProvider) GetResourceConnections() map[string]map[string][]string {\n\treturn map[string]map[string][]string{}\n}", "func computeClusterResources(nodes map[string]*repository.KubeNode) map[metrics.ResourceType]*repository.KubeDiscoveredResource {\n\t// sum the capacities of the node resources\n\tcomputeResources := make(map[metrics.ResourceType]float64)\n\tfor _, node := range nodes {\n\t\tnodeActive := util.NodeIsReady(node.Node) && util.NodeIsSchedulable(node.Node)\n\t\tif nodeActive {\n\t\t\t// Iterate over all ready and schedulable compute resource types\n\t\t\tfor _, rt := range metrics.KubeComputeResourceTypes {\n\t\t\t\t// get the compute resource if it exists\n\t\t\t\tnodeResource, exists := node.ComputeResources[rt]\n\t\t\t\tif !exists {\n\t\t\t\t\tglog.Errorf(\"Missing %s resource in node %s\", rt, node.Name)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// add the capacity to the cluster compute resource map\n\t\t\t\tcomputeCap, exists := computeResources[rt]\n\t\t\t\tif !exists {\n\t\t\t\t\tcomputeCap = nodeResource.Capacity\n\t\t\t\t} else {\n\t\t\t\t\tcomputeCap = computeCap + nodeResource.Capacity\n\t\t\t\t}\n\t\t\t\tcomputeResources[rt] = computeCap\n\t\t\t}\n\t\t}\n\t}\n\n\t// create KubeDiscoveredResource object for each compute resource type\n\tclusterResources := make(map[metrics.ResourceType]*repository.KubeDiscoveredResource)\n\tfor _, rt := range metrics.KubeComputeResourceTypes {\n\t\tcapacity := computeResources[rt]\n\t\tr := &repository.KubeDiscoveredResource{\n\t\t\tType: rt,\n\t\t\tCapacity: capacity,\n\t\t}\n\t\tclusterResources[rt] = r\n\t}\n\treturn clusterResources\n}", "func ConvertToResmgrResource(resource *task.ResourceConfig) *Resources {\n\treturn &Resources{\n\t\tCPU: resource.GetCpuLimit(),\n\t\tDISK: resource.GetDiskLimitMb(),\n\t\tGPU: resource.GetGpuLimit(),\n\t\tMEMORY: resource.GetMemLimitMb(),\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CompareAllocatableResources compares `expected` and `got` map zone:allocatableResources respectively (see: AllocatableResourceListFromNodeResourceTopology), and informs the caller if the maps are equal. Here `equal` means the same zoneNames with the same resources, where the resources are equal if they have the same resources with the same quantities. Returns the name of the different zone, the name of the different resources within the zone, the comparison result (same semantic as strings.Compare) and a boolean that reports if the resourceLists are consistent. See `CompareResourceList`.
func CompareAllocatableResources(expected, got map[string]corev1.ResourceList) (string, string, int, bool) { if len(got) != len(expected) { framework.Logf("-> expected=%v (len=%d) got=%v (len=%d)", expected, len(expected), got, len(got)) return "", "", 0, false } for expZoneName, expResList := range expected { gotResList, ok := got[expZoneName] if !ok { return expZoneName, "", 0, false } if resName, cmp, ok := CompareResourceList(expResList, gotResList); !ok || cmp != 0 { return expZoneName, resName, cmp, ok } } return "", "", 0, true }
[ "func CompareResourceList(expected, got corev1.ResourceList) (string, int, bool) {\n\tif len(got) != len(expected) {\n\t\tframework.Logf(\"-> expected=%v (len=%d) got=%v (len=%d)\", expected, len(expected), got, len(got))\n\t\treturn \"\", 0, false\n\t}\n\tfor expResName, expResQty := range expected {\n\t\tgotResQty, ok := got[expResName]\n\t\tif !ok {\n\t\t\treturn string(expResName), 0, false\n\t\t}\n\t\tif cmp := gotResQty.Cmp(expResQty); cmp != 0 {\n\t\t\tframework.Logf(\"-> resource=%q cmp=%d expected=%v got=%v\", expResName, cmp, expResQty, gotResQty)\n\t\t\treturn string(expResName), cmp, true\n\t\t}\n\t}\n\treturn \"\", 0, true\n}", "func CompareResources(resA, resB types.Resource) int {\n\tequal := cmp.Equal(resA, resB,\n\t\tignoreProtoXXXFields(),\n\t\tcmpopts.IgnoreFields(types.Metadata{}, \"ID\"),\n\t\tcmpopts.IgnoreFields(types.DatabaseV3{}, \"Status\"),\n\t\tcmpopts.EquateEmpty(),\n\t)\n\tif equal {\n\t\treturn Equal\n\t}\n\treturn Different\n}", "func ResourcesEqual(a, b map[string]envoy.Resource) bool {\n\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\n\tfor name, resource := range a {\n\t\tif !proto.Equal(resource, b[name]) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (r *Compare) Compare() (map[string][]schema.GroupVersionResource, error) {\n\tpreferredSrcResourceList, err := collectPreferredResources(r.SrcDiscovery)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsrcCRDResource, err := collectPreferredCRDResource(r.SrcDiscovery)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdstResourceList, err := collectNamespacedResources(r.DstDiscovery)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpreferredSrcResourceList, err = r.excludeCRDs(preferredSrcResourceList, srcCRDResource, r.SrcClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresourcesDiff := r.compareResources(preferredSrcResourceList, dstResourceList)\n\tincompatibleGVKs, err := convertToGVRList(resourcesDiff)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Don't report an incompatibleGVK if user settings will skip resource anyways\n\texcludedResources := toStringSlice(settings.ExcludedInitialResources.Union(toSet(r.Plan.Status.ExcludedResources)))\n\tfilteredGVKs := []schema.GroupVersionResource{}\n\tfor _, gvr := range incompatibleGVKs {\n\t\tskip := false\n\t\tfor _, resource := range excludedResources {\n\t\t\tif strings.EqualFold(gvr.Resource, resource) {\n\t\t\t\tskip = true\n\t\t\t}\n\t\t}\n\t\tif !skip {\n\t\t\tfilteredGVKs = append(filteredGVKs, gvr)\n\t\t}\n\t}\n\n\treturn r.collectIncompatibleMapping(filteredGVKs)\n}", "func (r *Compare) CompareCRDs() (map[string][]schema.GroupVersionResource, error) {\n\tsrcCRDResource, err := collectPreferredCRDResource(r.SrcDiscovery)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdstCRDResourceList, err := collectCRDResources(r.DstDiscovery)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcrdGVDiff := r.compareResources(srcCRDResource, dstCRDResourceList)\n\t// if len(crdGVDiff)>0, then CRD APIVersion is incompatible between src and dest\n\tif len(crdGVDiff) > 0 {\n\t\tsrcCRDs, err := collectPreferredResources(r.SrcDiscovery)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsrcCRDs, err = r.includeCRDsOnly(srcCRDs, srcCRDResource, r.SrcClient)\n\n\t\tdstCRDs, err := collectNamespacedResources(r.DstDiscovery)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdstCRDs, err = r.includeCRDsOnly(dstCRDs, dstCRDResourceList, r.DstClient)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcrdsDiff := r.compareResources(srcCRDs, dstCRDs)\n\t\tincompatibleGVKs, err := convertToGVRList(crdsDiff)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// Don't report an incompatibleGVK if user settings will skip resource anyways\n\t\texcludedResources := toStringSlice(settings.ExcludedInitialResources.Union(toSet(r.Plan.Status.ExcludedResources)))\n\t\tfilteredGVKs := []schema.GroupVersionResource{}\n\t\tfor _, gvr := range incompatibleGVKs {\n\t\t\tskip := false\n\t\t\tfor _, resource := range excludedResources {\n\t\t\t\tif strings.EqualFold(gvr.Resource, resource) {\n\t\t\t\t\tskip = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !skip {\n\t\t\t\tfilteredGVKs = append(filteredGVKs, gvr)\n\t\t\t}\n\t\t}\n\n\t\treturn r.collectIncompatibleMapping(filteredGVKs)\n\t}\n\treturn nil, nil\n}", "func (s *AirflowUISpec) ExpectedResources(rsrc interface{}) []ResourceInfo {\n\tr := rsrc.(*AirflowCluster)\n\treturn []ResourceInfo{\n\t\tResourceInfo{LifecycleManaged, s.sts(r), \"\"},\n\t\tResourceInfo{LifecycleManaged, s.secret(r), \"\"},\n\t}\n}", "func (s *NFSStoreSpec) ExpectedResources(rsrc interface{}) []ResourceInfo {\n\tr := rsrc.(*AirflowBase)\n\treturn []ResourceInfo{\n\t\tResourceInfo{LifecycleManaged, s.sts(r), \"\"},\n\t\tResourceInfo{LifecycleManaged, s.service(r), \"\"},\n\t\tResourceInfo{LifecycleManaged, s.podDisruption(r), \"\"},\n\t}\n}", "func (s *FlowerSpec) ExpectedResources(rsrc interface{}) []ResourceInfo {\n\tr := rsrc.(*AirflowCluster)\n\treturn []ResourceInfo{\n\t\tResourceInfo{LifecycleManaged, s.sts(r), \"\"},\n\t}\n}", "func (p *Plan) CompareResultSets(regressDir string, expectedDir string, t *tap.T) {\n\tfor i, rs := range p.ResultSets {\n\t\ttestName := strings.TrimPrefix(rs.Filename, regressDir+\"/out/\")\n\t\texpectedFilename := filepath.Join(expectedDir,\n\t\t\tfilepath.Base(rs.Filename))\n\t\tdiff, err := DiffFiles(expectedFilename, rs.Filename, 3)\n\n\t\tif err != nil {\n\t\t\tt.Diagnostic(\n\t\t\t\tfmt.Sprintf(`Query File: '%s'\nBindings File: '%s'\nBindings Name: '%s'\nQuery Parameters: '%v'\nExpected Result File: '%s'\nActual Result File: '%s'\n\nFailed to compare results: %s`,\n\t\t\t\t\tp.Query.Path,\n\t\t\t\t\tp.Path,\n\t\t\t\t\tp.Names[i],\n\t\t\t\t\tp.Bindings[i],\n\t\t\t\t\texpectedFilename,\n\t\t\t\t\trs.Filename,\n\t\t\t\t\terr.Error()))\n\t\t}\n\n\t\tif diff != \"\" {\n\t\t\tt.Diagnostic(\n\t\t\t\tfmt.Sprintf(`Query File: '%s'\nBindings File: '%s'\nBindings Name: '%s'\nQuery Parameters: '%v'\nExpected Result File: '%s'\nActual Result File: '%s'\n\n%s`,\n\t\t\t\t\tp.Query.Path,\n\t\t\t\t\tp.Path,\n\t\t\t\t\tp.Names[i],\n\t\t\t\t\tp.Bindings[i],\n\t\t\t\t\texpectedFilename,\n\t\t\t\t\trs.Filename,\n\t\t\t\t\tdiff))\n\t\t}\n\t\tt.Ok(diff == \"\", testName)\n\t}\n}", "func (rm *RsrcManager) SpecDiffers(expected, observed *reconciler.Object) bool {\n\te := expected.Obj.(*Object).Bucket\n\to := observed.Obj.(*Object).Bucket\n\treturn !reflect.DeepEqual(e.Acl, o.Acl) ||\n\t\t!reflect.DeepEqual(e.Billing, o.Billing) ||\n\t\t!reflect.DeepEqual(e.Cors, o.Cors) ||\n\t\t!reflect.DeepEqual(e.DefaultEventBasedHold, o.DefaultEventBasedHold) ||\n\t\t!reflect.DeepEqual(e.Encryption, o.Encryption) ||\n\t\t!reflect.DeepEqual(e.Labels, o.Labels) ||\n\t\t!reflect.DeepEqual(e.Lifecycle, o.Lifecycle) ||\n\t\t!strings.EqualFold(e.Location, o.Location) ||\n\t\t!reflect.DeepEqual(e.Logging, o.Logging) ||\n\t\t!reflect.DeepEqual(e.Name, o.Name) ||\n\t\t!reflect.DeepEqual(e.Owner, o.Owner) ||\n\t\t!reflect.DeepEqual(e.StorageClass, o.StorageClass) ||\n\t\t!reflect.DeepEqual(e.Versioning, o.Versioning) ||\n\t\t!reflect.DeepEqual(e.Website, o.Website)\n}", "func assertZonesMatch(t *testing.T, expected, actual time.Time) {\n\tt.Helper()\n\texpectedName, expectedOffset := expected.Zone()\n\tactualName, actualOffset := actual.Zone()\n\tif expectedOffset != actualOffset {\n\t\tt.Errorf(\"Expected Zone '%s' with offset %d. Got Zone '%s' with offset %d\", expectedName, expectedOffset, actualName, actualOffset)\n\t}\n}", "func compareOnChainAlloc(ctx context.Context, params *channel.Params, balances channel.Balances, assets []channel.Asset, cb *ethchannel.ContractBackend) error {\n\tonChain, err := getOnChainAllocation(ctx, cb, params, assets)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"getting on-chain allocation\")\n\t}\n\tfor a := range onChain {\n\t\tfor p := range onChain[a] {\n\t\t\tif balances[a][p].Cmp(onChain[a][p]) != 0 {\n\t\t\t\treturn errors.Errorf(\"balances[%d][%d] differ. Expected: %v, on-chain: %v\", a, p, balances[a][p], onChain[a][p])\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func Compare(expected, actual io.Reader) error {\n\texpScan := bufio.NewScanner(expected)\n\tactScan := bufio.NewScanner(actual)\n\n\tfor line := 1; ; line++ {\n\t\texp, hasExp := scanTrimRight(expScan)\n\t\tact, hasAct := scanTrimRight(actScan)\n\n\t\t// EOF at the same time\n\t\tif !hasExp && !hasAct {\n\t\t\treturn nil\n\t\t}\n\t\t// they are not equal\n\t\tif exp != act {\n\t\t\treturn newErr(line, exp, act)\n\t\t}\n\t\t// they are all exists and equal\n\t\tif hasExp && hasAct {\n\t\t\tcontinue\n\t\t}\n\t\t// verify all empty line lefts\n\t\tif err := verifyEOFSpace(\"actual\", actScan); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := verifyEOFSpace(\"expected\", expScan); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// at this point, they should all be same\n\t\treturn nil\n\t}\n}", "func DiffZones(canonical, comparison *Zone) *Zone {\n\tdiff := &Zone{\n\t\tServer: canonical.Server,\n\t\tARecords: map[string]net.IP{},\n\t\tCNAMERecords: map[string]string{},\n\t}\n\n\tcanonical.Lock()\n\tcomparison.Lock()\n\n\t// iterate over canonical map keys first\n\tfor name, target := range canonical.ARecords {\n\t\tcomparisonTarget, present := comparison.ARecords[name]\n\t\tif !present {\n\t\t\t// insert a deletion record\n\t\t\tdiff.ARecords[name] = SigilDeleteIP\n\t\t} else if !target.Equal(comparisonTarget) {\n\t\t\t// insert a change record\n\t\t\tdiff.ARecords[name] = comparisonTarget\n\t\t}\n\t}\n\tfor name, target := range canonical.CNAMERecords {\n\t\tcomparisonTarget, present := comparison.CNAMERecords[name]\n\t\tif !present {\n\t\t\t// insert a deletion record\n\t\t\tdiff.CNAMERecords[name] = \"\"\n\t\t} else if target != comparisonTarget {\n\t\t\t// insert a change record\n\t\t\tdiff.CNAMERecords[name] = comparisonTarget\n\t\t}\n\t}\n\t// iterate over comparison map keys to detect any additions\n\tfor name, target := range comparison.ARecords {\n\t\t_, present := canonical.ARecords[name]\n\t\tif !present {\n\t\t\t// insert an addition record\n\t\t\tdiff.ARecords[name] = target\n\t\t}\n\t}\n\tfor name, target := range comparison.CNAMERecords {\n\t\t_, present := canonical.CNAMERecords[name]\n\t\tif !present {\n\t\t\t// insert an addition record\n\t\t\tdiff.CNAMERecords[name] = target\n\t\t}\n\t}\n\n\tcanonical.Unlock()\n\tcomparison.Unlock()\n\n\treturn diff\n}", "func CompareResourceVersions(rvOld string, rvNew string) bool {\n\t// Ignore oldRV error as we care about new RV\n\ttempRvOld := ParseResourceVersion(rvOld)\n\ttempRvnew := ParseResourceVersion(rvNew)\n\treturn tempRvnew > tempRvOld\n}", "func (r *Resources) Equal(other *Resources) bool {\n\treturn equal(r.CPU, other.CPU) &&\n\t\tequal(r.MEMORY, other.MEMORY) &&\n\t\tequal(r.DISK, other.DISK) &&\n\t\tequal(r.GPU, other.GPU)\n}", "func AssertDiff(t *testing.T, diffs map[string]resource.Diff, name, original, current string) bool {\n\tvar ok bool\n\n\tif ok = assert.NotEmpty(t, diffs); !ok {\n\t\treturn false\n\t}\n\n\tif ok = assert.NotNil(t, diffs[name]); !ok {\n\t\treturn false\n\t}\n\n\tif ok = assert.Equal(t, original, diffs[name].Original()); !ok {\n\t\treturn false\n\t}\n\n\tif ok = assert.Equal(t, current, diffs[name].Current()); !ok {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func VerifyResources(resources string) error {\n\tif resources != \"\" {\n\t\tvar r map[string]interface{}\n\t\tif e := json.Unmarshal([]byte(resources), &r); e != nil {\n\t\t\treturn fmt.Errorf(\"%s: %s should be JSON format\", envResource, resources)\n\t\t}\n\t}\n\treturn nil\n}", "func compareDevices(expected *model.Device, actual *model.Device, t *testing.T) {\n\tassert.Equal(t, expected.Id, actual.Id)\n\tassert.Equal(t, expected.PubKey, actual.PubKey)\n\tassert.Equal(t, expected.IdData, actual.IdData)\n\tassert.Equal(t, expected.IdDataStruct, actual.IdDataStruct)\n\tassert.Equal(t, expected.IdDataSha256, actual.IdDataSha256)\n\tassert.Equal(t, expected.Status, actual.Status)\n\tassert.Equal(t, expected.ApiLimits, actual.ApiLimits)\n\tcompareTime(expected.CreatedTs, actual.CreatedTs, t)\n\tcompareTime(expected.UpdatedTs, actual.UpdatedTs, t)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CompareResourceList compares `expected` and `got` ResourceList respectively, and informs the caller if the two ResourceList are equal. Here `equal` means the same resources with the same quantities. Returns the different resource, the comparison result (same semantic as strings.Compare) and a boolean that reports if the resourceLists are consistent. The ResourceLists are consistent only if the represent the same resource set (all the resources listed in one are also present in the another; no ResourceList is a superset nor a subset of the other)
func CompareResourceList(expected, got corev1.ResourceList) (string, int, bool) { if len(got) != len(expected) { framework.Logf("-> expected=%v (len=%d) got=%v (len=%d)", expected, len(expected), got, len(got)) return "", 0, false } for expResName, expResQty := range expected { gotResQty, ok := got[expResName] if !ok { return string(expResName), 0, false } if cmp := gotResQty.Cmp(expResQty); cmp != 0 { framework.Logf("-> resource=%q cmp=%d expected=%v got=%v", expResName, cmp, expResQty, gotResQty) return string(expResName), cmp, true } } return "", 0, true }
[ "func TestResourceListSorting(t *testing.T) {\n\tsortedResourceList := make([]string, len(resourceList))\n\tcopy(sortedResourceList, resourceList)\n\tsort.Strings(sortedResourceList)\n\tfor i := 0; i < len(resourceList); i++ {\n\t\tif resourceList[i] != sortedResourceList[i] {\n\t\t\tt.Errorf(\"Expected resourceList[%d] = \\\"%s\\\", resourceList is not correctly sorted.\", i, sortedResourceList[i])\n\t\t\tbreak\n\t\t}\n\t}\n}", "func CmpResourceList(a, b *v1.ResourceList) bool {\n\treturn a.Cpu().Cmp(*b.Cpu()) == 0 &&\n\t\ta.Memory().Cmp(*b.Memory()) == 0 &&\n\t\tb.Pods().Cmp(*b.Pods()) == 0 &&\n\t\tb.StorageEphemeral().Cmp(*b.StorageEphemeral()) == 0\n}", "func CompareResources(resA, resB types.Resource) int {\n\tequal := cmp.Equal(resA, resB,\n\t\tignoreProtoXXXFields(),\n\t\tcmpopts.IgnoreFields(types.Metadata{}, \"ID\"),\n\t\tcmpopts.IgnoreFields(types.DatabaseV3{}, \"Status\"),\n\t\tcmpopts.EquateEmpty(),\n\t)\n\tif equal {\n\t\treturn Equal\n\t}\n\treturn Different\n}", "func CompareAllocatableResources(expected, got map[string]corev1.ResourceList) (string, string, int, bool) {\n\tif len(got) != len(expected) {\n\t\tframework.Logf(\"-> expected=%v (len=%d) got=%v (len=%d)\", expected, len(expected), got, len(got))\n\t\treturn \"\", \"\", 0, false\n\t}\n\tfor expZoneName, expResList := range expected {\n\t\tgotResList, ok := got[expZoneName]\n\t\tif !ok {\n\t\t\treturn expZoneName, \"\", 0, false\n\t\t}\n\t\tif resName, cmp, ok := CompareResourceList(expResList, gotResList); !ok || cmp != 0 {\n\t\t\treturn expZoneName, resName, cmp, ok\n\t\t}\n\t}\n\treturn \"\", \"\", 0, true\n}", "func CompareResourceVersions(rvOld string, rvNew string) bool {\n\t// Ignore oldRV error as we care about new RV\n\ttempRvOld := ParseResourceVersion(rvOld)\n\ttempRvnew := ParseResourceVersion(rvNew)\n\treturn tempRvnew > tempRvOld\n}", "func TestListResources(t *testing.T) {\n\n\tdefer gock.Off()\n\n\tmock := gock.New(\"https://akaa-baseurl-xxxxxxxxxxx-xxxxxxxxxxxxx.luna.akamaiapis.net/config-gtm/v1/domains/\" + gtmTestDomain + \"/resources\")\n\tmock.\n\t\tGet(\"/config-gtm/v1/domains/\"+gtmTestDomain+\"/resources\").\n\t\tHeaderPresent(\"Authorization\").\n\t\tReply(200).\n\t\tSetHeader(\"Content-Type\", \"application/vnd.config-gtm.v1.3+json;charset=UTF-8\").\n\t\tBodyString(`{\n \"items\" : [ {\n \"aggregationType\" : \"median\",\n \"constrainedProperty\" : null,\n \"decayRate\" : null,\n \"description\" : null,\n \"hostHeader\" : null,\n \"leaderString\" : null,\n \"leastSquaresDecay\" : null,\n \"loadImbalancePercentage\" : null,\n \"maxUMultiplicativeIncrement\" : null,\n \"name\" : \"testResource\",\n \"resourceInstances\" : [ ],\n \"type\" : \"Download score\",\n \"upperBound\" : 0,\n \"links\" : [ {\n \"rel\" : \"self\",\n \"href\" : \"https://akaa-32qkzqewderdchot-d3uwbyqc4pqi2c5l.luna-dev.akamaiapis.net/config-gtm/v1/domains/gtmdomtest.akadns.net/resources/testResource\"\n } ]\n } ]\n }`)\n\n\tInit(config)\n\tresourceList, err := ListResources(gtmTestDomain)\n\tassert.NoError(t, err)\n\tassert.NotEqual(t, resourceList, nil)\n\n\tif len(resourceList) > 0 {\n\t\tfirstResource := resourceList[0]\n\t\tassert.Equal(t, firstResource.Name, GtmTestResource)\n\t} else {\n\t\tt.Fatal(\"List empty!\")\n\t}\n}", "func ResourcesEqual(a, b map[string]envoy.Resource) bool {\n\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\n\tfor name, resource := range a {\n\t\tif !proto.Equal(resource, b[name]) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (p *Plan) CompareResultSets(regressDir string, expectedDir string, t *tap.T) {\n\tfor i, rs := range p.ResultSets {\n\t\ttestName := strings.TrimPrefix(rs.Filename, regressDir+\"/out/\")\n\t\texpectedFilename := filepath.Join(expectedDir,\n\t\t\tfilepath.Base(rs.Filename))\n\t\tdiff, err := DiffFiles(expectedFilename, rs.Filename, 3)\n\n\t\tif err != nil {\n\t\t\tt.Diagnostic(\n\t\t\t\tfmt.Sprintf(`Query File: '%s'\nBindings File: '%s'\nBindings Name: '%s'\nQuery Parameters: '%v'\nExpected Result File: '%s'\nActual Result File: '%s'\n\nFailed to compare results: %s`,\n\t\t\t\t\tp.Query.Path,\n\t\t\t\t\tp.Path,\n\t\t\t\t\tp.Names[i],\n\t\t\t\t\tp.Bindings[i],\n\t\t\t\t\texpectedFilename,\n\t\t\t\t\trs.Filename,\n\t\t\t\t\terr.Error()))\n\t\t}\n\n\t\tif diff != \"\" {\n\t\t\tt.Diagnostic(\n\t\t\t\tfmt.Sprintf(`Query File: '%s'\nBindings File: '%s'\nBindings Name: '%s'\nQuery Parameters: '%v'\nExpected Result File: '%s'\nActual Result File: '%s'\n\n%s`,\n\t\t\t\t\tp.Query.Path,\n\t\t\t\t\tp.Path,\n\t\t\t\t\tp.Names[i],\n\t\t\t\t\tp.Bindings[i],\n\t\t\t\t\texpectedFilename,\n\t\t\t\t\trs.Filename,\n\t\t\t\t\tdiff))\n\t\t}\n\t\tt.Ok(diff == \"\", testName)\n\t}\n}", "func Test_APISpecification_not_same(t *testing.T) {\n\tvar as1, as2 *APISpecList\n\tasString1 := `[{\"specRef\": \"http://mycompany.com/dm/gps\",\"organization\":\"myorg\",\"version\":\"1.0.0\",\"exclusiveAccess\":false,\"arch\":\"arm\"}]`\n\tasString2 := `[{\"specRef\": \"http://mycompany.com/dm/gps\",\"organization\":\"myorg\",\"version\":\"2.0.0\",\"exclusiveAccess\":false,\"arch\":\"arm\"}]`\n\n\tif as1 = create_APISpecification(asString1, t); as1 != nil {\n\t\tif as2 = create_APISpecification(asString2, t); as2 != nil {\n\t\t\tif as1.IsSame(*as2, true) {\n\t\t\t\tt.Errorf(\"Error: %v and %v are NOT the same.\", as1, as2)\n\t\t\t}\n\t\t}\n\t}\n\n\tasString1 = `[{\"specRef\": \"http://mycompany.com/dm/gps\",\"organization\":\"myorg\",\"version\":\"1.0.0\",\"exclusiveAccess\":false,\"arch\":\"arm\"}]`\n\tasString2 = `[{\"specRef\": \"http://mycompany.com/dm/gps2\",\"organization\":\"myorg\",\"version\":\"1.0.0\",\"exclusiveAccess\":false,\"arch\":\"arm\"}]`\n\n\tif as1 = create_APISpecification(asString1, t); as1 != nil {\n\t\tif as2 = create_APISpecification(asString2, t); as2 != nil {\n\t\t\tif as1.IsSame(*as2, true) {\n\t\t\t\tt.Errorf(\"Error: %v and %v are NOT the same.\", as1, as2)\n\t\t\t}\n\t\t}\n\t}\n\n\tasString1 = `[{\"specRef\": \"http://mycompany.com/dm/gps\",\"organization\":\"myorg\",\"version\":\"1.0.0\",\"exclusiveAccess\":false,\"arch\":\"arm\"}]`\n\tasString2 = `[{\"specRef\": \"http://mycompany.com/dm/gps\",\"organization\":\"myorg\",\"version\":\"1.0.0\",\"exclusiveAccess\":true,\"arch\":\"arm\"}]`\n\n\tif as1 = create_APISpecification(asString1, t); as1 != nil {\n\t\tif as2 = create_APISpecification(asString2, t); as2 != nil {\n\t\t\tif as1.IsSame(*as2, true) {\n\t\t\t\tt.Errorf(\"Error: %v and %v are NOT the same.\", as1, as2)\n\t\t\t}\n\t\t}\n\t}\n\n\tasString1 = `[{\"specRef\": \"http://mycompany.com/dm/gps\",\"organization\":\"myorg\",\"version\":\"1.0.0\",\"exclusiveAccess\":false,\"arch\":\"arm\"}]`\n\tasString2 = `[{\"specRef\": \"http://mycompany.com/dm/gps\",\"organization\":\"myorg\",\"version\":\"1.0.0\",\"exclusiveAccess\":false,\"arch\":\"amd64\"}]`\n\n\tif as1 = create_APISpecification(asString1, t); as1 != nil {\n\t\tif as2 = create_APISpecification(asString2, t); as2 != nil {\n\t\t\tif as1.IsSame(*as2, true) {\n\t\t\t\tt.Errorf(\"Error: %v and %v are NOT the same.\", as1, as2)\n\t\t\t}\n\t\t}\n\t}\n\n\tasString1 = `[{\"specRef\": \"http://mycompany.com/dm/gps\",\"organization\":\"myorg\",\"version\":\"1.0.0\",\"exclusiveAccess\":false,\"arch\":\"arm\"}]`\n\tasString2 = `[{\"specRef\": \"http://mycompany.com/dm/gps\",\"organization\":\"yourorg\",\"version\":\"1.0.0\",\"exclusiveAccess\":false,\"arch\":\"arm\"}]`\n\n\tif as1 = create_APISpecification(asString1, t); as1 != nil {\n\t\tif as2 = create_APISpecification(asString2, t); as2 != nil {\n\t\t\tif as1.IsSame(*as2, true) {\n\t\t\t\tt.Errorf(\"Error: %v and %v are NOT the same.\", as1, as2)\n\t\t\t}\n\t\t}\n\t}\n\n\tasString1 = `[{\"specRef\": \"http://mycompany.com/dm/gps\",\"organization\":\"myorg\",\"version\":\"1.0.0\",\"exclusiveAccess\":false,\"arch\":\"arm\"}]`\n\tasString2 = `[{\"specRef\": \"http://mycompany.com/dm/gps2\",\"organization\":\"myorg\",\"version\":\"2.0.0\",\"exclusiveAccess\":false,\"arch\":\"arm\"}]`\n\n\tif as1 = create_APISpecification(asString1, t); as1 != nil {\n\t\tif as2 = create_APISpecification(asString2, t); as2 != nil {\n\t\t\tif as1.IsSame(*as2, false) {\n\t\t\t\tt.Errorf(\"Error: %v and %v are NOT the same even ignoring version.\", as1, as2)\n\t\t\t}\n\t\t}\n\t}\n\n\tasString1 = `[{\"specRef\": \"http://mycompany.com/dm/gps\",\"organization\":\"myorg\",\"version\":\"1.0.0\",\"exclusiveAccess\":false,\"arch\":\"arm\"}]`\n\tasString2 = `[{\"specRef\": \"http://mycompany.com/dm/gps\",\"organization\":\"myorg\",\"version\":\"2.0.0\",\"exclusiveAccess\":true,\"arch\":\"arm\"}]`\n\n\tif as1 = create_APISpecification(asString1, t); as1 != nil {\n\t\tif as2 = create_APISpecification(asString2, t); as2 != nil {\n\t\t\tif as1.IsSame(*as2, false) {\n\t\t\t\tt.Errorf(\"Error: %v and %v are NOT the same even ignoring version.\", as1, as2)\n\t\t\t}\n\t\t}\n\t}\n\n\tasString1 = `[{\"specRef\": \"http://mycompany.com/dm/gps\",\"organization\":\"myorg\",\"version\":\"1.0.0\",\"exclusiveAccess\":false,\"arch\":\"arm\"}]`\n\tasString2 = `[{\"specRef\": \"http://mycompany.com/dm/gps\",\"organization\":\"myorg\",\"version\":\"2.0.0\",\"exclusiveAccess\":false,\"arch\":\"amd64\"}]`\n\n\tif as1 = create_APISpecification(asString1, t); as1 != nil {\n\t\tif as2 = create_APISpecification(asString2, t); as2 != nil {\n\t\t\tif as1.IsSame(*as2, false) {\n\t\t\t\tt.Errorf(\"Error: %v and %v are NOT the same even ignoring version.\", as1, as2)\n\t\t\t}\n\t\t}\n\t}\n\n}", "func (r *Resources) Equal(other *Resources) bool {\n\treturn equal(r.CPU, other.CPU) &&\n\t\tequal(r.MEMORY, other.MEMORY) &&\n\t\tequal(r.DISK, other.DISK) &&\n\t\tequal(r.GPU, other.GPU)\n}", "func validateResourceList(resourceList core.ResourceList, upperBound core.ResourceList, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\tfor resourceName, quantity := range resourceList {\n\t\tresPath := fldPath.Key(string(resourceName))\n\t\t// Validate resource name.\n\t\tallErrs = append(allErrs, validateResourceName(&resourceName, resPath)...)\n\t\t// Validate resource quantity.\n\t\tallErrs = append(allErrs, corevalidation.ValidateResourceQuantityValue(string(resourceName), quantity, resPath)...)\n\t\tif upperBound != nil {\n\t\t\t// Check that request <= limit.\n\t\t\tupperBoundQuantity, exists := upperBound[resourceName]\n\t\t\tif exists && quantity.Cmp(upperBoundQuantity) > 0 {\n\t\t\t\tallErrs = append(allErrs, field.Invalid(fldPath, quantity.String(),\n\t\t\t\t\t\"must be less than or equal to the upper bound\"))\n\t\t\t}\n\t\t}\n\t}\n\treturn allErrs\n}", "func ExpectedResourceListFor(expectedISCount int64) kapi.ResourceList {\n\treturn kapi.ResourceList{\n\t\timageapi.ResourceImageStreams: *resource.NewQuantity(expectedISCount, resource.DecimalSI),\n\t}\n}", "func testCheckDDCloudAddressListMatches(name string, expected compute.IPAddressList) resource.TestCheckFunc {\n\tname = ensureResourceTypePrefix(name, \"ddcloud_address_list\")\n\n\treturn func(state *terraform.State) error {\n\t\tres, ok := state.RootModule().Resources[name]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", name)\n\t\t}\n\n\t\taddressListID := res.Primary.ID\n\n\t\tclient := testAccProvider.Meta().(*providerState).Client()\n\t\taddressList, err := client.GetIPAddressList(addressListID)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"bad: Get address list: %s\", err)\n\t\t}\n\t\tif addressList == nil {\n\t\t\treturn fmt.Errorf(\"bad: address list not found with Id '%s'\", addressListID)\n\t\t}\n\n\t\tif addressList.Name != expected.Name {\n\t\t\treturn fmt.Errorf(\"bad: address list '%s' has name '%s' (expected '%s')\", addressListID, addressList.Name, expected.Name)\n\t\t}\n\n\t\tif addressList.Description != expected.Description {\n\t\t\treturn fmt.Errorf(\"bad: address list '%s' has description '%s' (expected '%s')\", addressListID, addressList.Description, expected.Description)\n\t\t}\n\n\t\tif len(addressList.Addresses) != len(expected.Addresses) {\n\t\t\treturn fmt.Errorf(\"bad: address list '%s' has %d addresses or address-ranges (expected '%d')\", addressListID, len(addressList.Addresses), len(expected.Addresses))\n\t\t}\n\n\t\terr = compareAddressListEntries(expected, *addressList)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(addressList.ChildLists) != len(expected.ChildLists) {\n\t\t\treturn fmt.Errorf(\"bad: address list '%s' has %d child lists (expected '%d')\", addressListID, len(addressList.ChildLists), len(expected.ChildLists))\n\t\t}\n\n\t\tfor index := range addressList.ChildLists {\n\t\t\texpectedChildListID := expected.ChildLists[index].ID\n\t\t\tactualChildListID := addressList.ChildLists[index].ID\n\n\t\t\tif actualChildListID != expectedChildListID {\n\t\t\t\treturn fmt.Errorf(\"bad: address list '%s' has child list at index %d with Id %s (expected '%s')\",\n\t\t\t\t\taddressListID, index, actualChildListID, expectedChildListID,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func Test_APISpecification_same(t *testing.T) {\n\tvar as1, as2 *APISpecList\n\tasString1 := `[{\"specRef\": \"http://mycompany.com/dm/gps\",\"organization\":\"myorg\",\"version\":\"1.0.0\",\"exclusiveAccess\":false,\"arch\":\"arm\"}]`\n\tasString2 := `[{\"specRef\": \"http://mycompany.com/dm/gps\",\"organization\":\"myorg\",\"version\":\"1.0.0\",\"exclusiveAccess\":false,\"arch\":\"arm\"}]`\n\n\tif as1 = create_APISpecification(asString1, t); as1 != nil {\n\t\tif as2 = create_APISpecification(asString2, t); as2 != nil {\n\t\t\tif !as1.IsSame(*as2, true) {\n\t\t\t\tt.Errorf(\"Error: %v and %v are the same.\", as1, as2)\n\t\t\t}\n\t\t}\n\t}\n\n\tasString1 = `[{\"specRef\": \"http://mycompany.com/dm/gps\",\"organization\":\"myorg\",\"version\":\"1.0.0\",\"exclusiveAccess\":false,\"arch\":\"arm\"}]`\n\tasString2 = `[{\"specRef\": \"http://mycompany.com/dm/gps\",\"organization\":\"myorg\",\"version\":\"2.0.0\",\"exclusiveAccess\":false,\"arch\":\"arm\"}]`\n\n\tif as1 = create_APISpecification(asString1, t); as1 != nil {\n\t\tif as2 = create_APISpecification(asString2, t); as2 != nil {\n\t\t\tif !as1.IsSame(*as2, false) {\n\t\t\t\tt.Errorf(\"Error: %v and %v are the same ignoring version.\", as1, as2)\n\t\t\t}\n\t\t}\n\t}\n\n}", "func assertEqualEndpointLists(t *testing.T, expected, actual []*Endpoint) {\n\texpectedSet := map[string]*Endpoint{}\n\tfor _, ep := range expected {\n\t\tuid, found := ep.getSingleValuedAttrs()[DestinationUID.AttrName()]\n\t\tif !found {\n\t\t\tt.Fatalf(\"expected ep found with no UID is an indication of bad test data: '%v'\", ep)\n\t\t}\n\t\texpectedSet[uid] = ep\n\t}\n\tactualSet := map[string]*Endpoint{}\n\tfor _, ep := range actual {\n\t\tuid, found := ep.getSingleValuedAttrs()[DestinationUID.AttrName()]\n\t\tif !found {\n\t\t\tt.Errorf(\"actual ep found with no UID '%s'\", epDebugInfo(ep))\n\t\t\tcontinue\n\t\t}\n\t\tactualSet[uid] = ep\n\t}\n\tfor uid, expectedEp := range expectedSet {\n\t\tactualEp, found := actualSet[uid]\n\t\tif !found {\n\t\t\tt.Errorf(\"expecting endpoint\\nShortForm: %s\\nLongForm : %s\\nfound none\", epDebugInfo(expectedEp), *expectedEp)\n\t\t\tcontinue\n\t\t}\n\t\tassertEqualEndpoints(t, expectedEp, actualEp)\n\t\tdelete(actualSet, uid)\n\t}\n\tfor _, ep := range actualSet {\n\t\tt.Errorf(\"unexpected endpoint found: %s\", epDebugInfo(ep))\n\t}\n\tif len(expected) != len(actual) {\n\t\tt.Errorf(\"expected endpoint count: %d do not tally with actual count: %d\", len(expected), len(actual))\n\t}\n}", "func CompareIPLists(a []string, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn true\n\t}\n\n\tsort.Strings(a)\n\tsort.Strings(b)\n\n\tif !reflect.DeepEqual(a, b) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (r *Compare) Compare() (map[string][]schema.GroupVersionResource, error) {\n\tpreferredSrcResourceList, err := collectPreferredResources(r.SrcDiscovery)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsrcCRDResource, err := collectPreferredCRDResource(r.SrcDiscovery)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdstResourceList, err := collectNamespacedResources(r.DstDiscovery)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpreferredSrcResourceList, err = r.excludeCRDs(preferredSrcResourceList, srcCRDResource, r.SrcClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresourcesDiff := r.compareResources(preferredSrcResourceList, dstResourceList)\n\tincompatibleGVKs, err := convertToGVRList(resourcesDiff)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Don't report an incompatibleGVK if user settings will skip resource anyways\n\texcludedResources := toStringSlice(settings.ExcludedInitialResources.Union(toSet(r.Plan.Status.ExcludedResources)))\n\tfilteredGVKs := []schema.GroupVersionResource{}\n\tfor _, gvr := range incompatibleGVKs {\n\t\tskip := false\n\t\tfor _, resource := range excludedResources {\n\t\t\tif strings.EqualFold(gvr.Resource, resource) {\n\t\t\t\tskip = true\n\t\t\t}\n\t\t}\n\t\tif !skip {\n\t\t\tfilteredGVKs = append(filteredGVKs, gvr)\n\t\t}\n\t}\n\n\treturn r.collectIncompatibleMapping(filteredGVKs)\n}", "func maxResourceList(list, new corev1.ResourceList) {\n\tfor name, quantity := range new {\n\t\tif value, ok := list[name]; !ok {\n\t\t\tlist[name] = quantity.DeepCopy()\n\t\t\tcontinue\n\t\t} else {\n\t\t\tif quantity.Cmp(value) > 0 {\n\t\t\t\tlist[name] = quantity.DeepCopy()\n\t\t\t}\n\t\t}\n\t}\n}", "func testCheckDDCloudPortListMatches(name string, expected compute.PortList) resource.TestCheckFunc {\n\tname = ensureResourceTypePrefix(name, \"ddcloud_port_list\")\n\n\treturn func(state *terraform.State) error {\n\t\tres, ok := state.RootModule().Resources[name]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"not found: %s\", name)\n\t\t}\n\n\t\tportListID := res.Primary.ID\n\n\t\tclient := testAccProvider.Meta().(*providerState).Client()\n\t\tportList, err := client.GetPortList(portListID)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"bad: get port list: %s\", err)\n\t\t}\n\t\tif portList == nil {\n\t\t\treturn fmt.Errorf(\"bad: port list not found with Id '%s'\", portListID)\n\t\t}\n\n\t\tif portList.Name != expected.Name {\n\t\t\treturn fmt.Errorf(\"bad: port list '%s' has name '%s' (expected '%s')\", portListID, portList.Name, expected.Name)\n\t\t}\n\n\t\tif portList.Description != expected.Description {\n\t\t\treturn fmt.Errorf(\"bad: port list '%s' has description '%s' (expected '%s')\", portListID, portList.Description, expected.Description)\n\t\t}\n\n\t\tif len(portList.Ports) != len(expected.Ports) {\n\t\t\treturn fmt.Errorf(\"bad: port list '%s' has %d ports or port ranges (expected '%d')\", portListID, len(portList.Ports), len(expected.Ports))\n\t\t}\n\n\t\terr = comparePortListEntries(expected, *portList)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(portList.ChildLists) != len(expected.ChildLists) {\n\t\t\treturn fmt.Errorf(\"bad: port list '%s' has %d child lists (expected '%d')\", portListID, len(portList.ChildLists), len(expected.ChildLists))\n\t\t}\n\n\t\tfor index := range portList.ChildLists {\n\t\t\texpectedChildListID := expected.ChildLists[index].ID\n\t\t\tactualChildListID := portList.ChildLists[index].ID\n\n\t\t\tif actualChildListID != expectedChildListID {\n\t\t\t\treturn fmt.Errorf(\"bad: port list '%s' has child list at index %d with Id %s (expected '%s')\",\n\t\t\t\t\tportListID, index, actualChildListID, expectedChildListID,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsValidNodeTopology checks the provided NodeResourceTopology object if it is wellformad, internally consistent and consistent with the given kubelet config object. Returns true if the NodeResourceTopology object is consistent and well formet, false otherwise; if return false, logs the failure reason.
func IsValidNodeTopology(nodeTopology *v1alpha2.NodeResourceTopology, kubeletConfig *kubeletconfig.KubeletConfiguration) bool { if nodeTopology == nil || len(nodeTopology.TopologyPolicies) == 0 { framework.Logf("failed to get topology policy from the node topology resource") return false } tmPolicy := string(topologypolicy.DetectTopologyPolicy(kubeletConfig.TopologyManagerPolicy, kubeletConfig.TopologyManagerScope)) if nodeTopology.TopologyPolicies[0] != tmPolicy { framework.Logf("topology policy mismatch got %q expected %q", nodeTopology.TopologyPolicies[0], tmPolicy) return false } expectedPolicyAttribute := v1alpha2.AttributeInfo{ Name: nfdtopologyupdater.TopologyManagerPolicyAttributeName, Value: kubeletConfig.TopologyManagerPolicy, } if !containsAttribute(nodeTopology.Attributes, expectedPolicyAttribute) { framework.Logf("topology policy attributes don't have correct topologyManagerPolicy attribute expected %v attributeList %v", expectedPolicyAttribute, nodeTopology.Attributes) return false } expectedScopeAttribute := v1alpha2.AttributeInfo{ Name: nfdtopologyupdater.TopologyManagerScopeAttributeName, Value: kubeletConfig.TopologyManagerScope, } if !containsAttribute(nodeTopology.Attributes, expectedScopeAttribute) { framework.Logf("topology policy attributes don't have correct topologyManagerScope attribute expected %v attributeList %v", expectedScopeAttribute, nodeTopology.Attributes) return false } if nodeTopology.Zones == nil || len(nodeTopology.Zones) == 0 { framework.Logf("failed to get topology zones from the node topology resource") return false } foundNodes := 0 for _, zone := range nodeTopology.Zones { // TODO constant not in the APIs if !strings.HasPrefix(strings.ToUpper(zone.Type), "NODE") { continue } foundNodes++ if !isValidCostList(zone.Name, zone.Costs) { framework.Logf("invalid cost list for zone %q", zone.Name) return false } if !isValidResourceList(zone.Name, zone.Resources) { framework.Logf("invalid resource list for zone %q", zone.Name) return false } } return foundNodes > 0 }
[ "func nodeIsValidForTopologyAwareHints(node *corev1.Node) bool {\n\treturn !node.Status.Allocatable.Cpu().IsZero() && node.Labels[corev1.LabelTopologyZone] != \"\"\n}", "func (t Topology) Validate() error {\n\terrs := []string{}\n\n\t// Check all node metadatas are valid, and the keys are parseable, i.e.\n\t// contain a scope.\n\tfor nodeID, nmd := range t.Nodes {\n\t\tif nmd.Metadata == nil {\n\t\t\terrs = append(errs, fmt.Sprintf(\"node ID %q has nil metadata\", nodeID))\n\t\t}\n\t\tif _, _, ok := ParseNodeID(nodeID); !ok {\n\t\t\terrs = append(errs, fmt.Sprintf(\"invalid node ID %q\", nodeID))\n\t\t}\n\n\t\t// Check all adjancency keys has entries in Node.\n\t\tfor _, dstNodeID := range nmd.Adjacency {\n\t\t\tif _, ok := t.Nodes[dstNodeID]; !ok {\n\t\t\t\terrs = append(errs, fmt.Sprintf(\"node metadata missing from adjacency %q -> %q\", nodeID, dstNodeID))\n\t\t\t}\n\t\t}\n\n\t\t// Check all the edge metadatas have entries in adjacencies\n\t\tfor dstNodeID := range nmd.Edges {\n\t\t\tif _, ok := t.Nodes[dstNodeID]; !ok {\n\t\t\t\terrs = append(errs, fmt.Sprintf(\"node %s metadatas missing for edge %q\", dstNodeID, nodeID))\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(errs) > 0 {\n\t\treturn fmt.Errorf(\"%d error(s): %s\", len(errs), strings.Join(errs, \"; \"))\n\t}\n\n\treturn nil\n}", "func (d *portworx) ValidateVolumeTopology(vol *api.Volume, params map[string]string) error {\n\tvar topoMatches bool\n\tvar err error\n\tzone := params[torpedok8s.TopologyZoneK8sNodeLabel]\n\tnodes := node.GetNodesByTopologyZoneLabel(zone)\n\tfor _, node := range nodes {\n\t\tif topoMatches, err = d.isVolumeAttachedOnNode(vol, node); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif topoMatches {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn &ErrCsiTopologyMismatch{\n\t\tVolName: vol.Locator.Name,\n\t\tCause: fmt.Errorf(\"volume [%s] is not attched on nodes with topology label [%s]\", vol.Id, zone),\n\t}\n}", "func (m *NodeTopo) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateBrickTopo(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateCluster(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateClusterTopo(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateHost(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLocalID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePosition(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (topo Topology) Validate() error {\n\tif topo.NPopulations < 1 {\n\t\treturn errors.New(\"'NPopulations' should be higher or equal to 1\")\n\t}\n\tif topo.NClusters < 0 {\n\t\treturn errors.New(\"'NClusters' should be higher or equal to 1 if provided\")\n\t}\n\tif topo.NIndividuals < 1 {\n\t\treturn errors.New(\"'NIndividuals' should be higher or equal to 1\")\n\t}\n\treturn nil\n}", "func (c *CLab) CheckTopologyDefinition(ctx context.Context) error {\n\tvar err error\n\tif err = c.verifyBridgesExist(); err != nil {\n\t\treturn err\n\t}\n\tif err = c.verifyLinks(); err != nil {\n\t\treturn err\n\t}\n\tif err = c.verifyRootNetnsInterfaceUniqueness(); err != nil {\n\t\treturn err\n\t}\n\tif err = c.VerifyContainersUniqueness(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err = c.verifyVirtSupport(); err != nil {\n\t\treturn err\n\t}\n\tif err = c.verifyHostIfaces(); err != nil {\n\t\treturn err\n\t}\n\treturn c.VerifyImages(ctx)\n}", "func (ns NodeSolver) Validate() bool {\n\treturn config.ValidateNodeSolver(&ns)\n}", "func ValidateNodeSolver(ns RextNodeSolver) (hasError bool) {\n\tif len(ns.GetNodePath()) == 0 {\n\t\thasError = true\n\t\tlog.Errorln(\"node path is required in node solver config\")\n\t}\n\treturn hasError\n}", "func GetNodeTopology(ctx context.Context, topologyClient *topologyclientset.Clientset, nodeName string) *v1alpha2.NodeResourceTopology {\n\tvar nodeTopology *v1alpha2.NodeResourceTopology\n\tvar err error\n\tgomega.EventuallyWithOffset(1, func() bool {\n\t\tnodeTopology, err = topologyClient.TopologyV1alpha2().NodeResourceTopologies().Get(ctx, nodeName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tframework.Logf(\"failed to get the node topology resource: %v\", err)\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}, time.Minute, 5*time.Second).Should(gomega.BeTrue())\n\treturn nodeTopology\n}", "func IsNodeHealthy() bool {\n\t// Synced\n\tif !tangle.IsNodeSyncedWithThreshold() {\n\t\treturn false\n\t}\n\n\t// Has connected neighbors\n\tif peering.Manager().ConnectedPeerCount() == 0 {\n\t\treturn false\n\t}\n\n\t// Latest milestone timestamp\n\tvar milestoneTimestamp int64\n\tlmi := tangle.GetLatestMilestoneIndex()\n\tcachedLatestMs := tangle.GetMilestoneOrNil(lmi) // bundle +1\n\tif cachedLatestMs == nil {\n\t\treturn false\n\t}\n\n\tcachedMsTailTx := cachedLatestMs.GetBundle().GetTail() // tx +1\n\tmilestoneTimestamp = cachedMsTailTx.GetTransaction().GetTimestamp()\n\tcachedMsTailTx.Release(true) // tx -1\n\tcachedLatestMs.Release(true) // bundle -1\n\n\t// Check whether the milestone is older than 5 minutes\n\ttimeMs := time.Unix(milestoneTimestamp, 0)\n\treturn time.Since(timeMs) < maxAllowedMilestoneAge\n}", "func (r *reconciler) shouldEnableTopologyAwareHints(dns *operatorv1.DNS) (bool, error) {\n\tvar nodesList corev1.NodeList\n\tif err := r.cache.List(context.TODO(), &nodesList); err != nil {\n\t\treturn false, err\n\t}\n\tnodes := 0\n\tfor i := range nodesList.Items {\n\t\tif ignoreNodeForTopologyAwareHints(&nodesList.Items[i]) {\n\t\t\tcontinue\n\t\t}\n\t\tif !nodeIsValidForTopologyAwareHints(&nodesList.Items[i]) {\n\t\t\treturn false, nil\n\t\t}\n\t\tnodes++\n\t}\n\n\treturn nodes >= 2, nil\n}", "func (m *NodeTopoHost) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (_Staking *StakingCaller) IsValidStakingNode(opts *bind.CallOpts, nodeAddr common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Staking.contract.Call(opts, out, \"isValidStakingNode\", nodeAddr)\n\treturn *ret0, err\n}", "func (t *Tainter) IsNodeTainted(ctx context.Context) (bool, error) {\n\tnode, err := t.client.CoreV1().Nodes().Get(ctx, t.nodeName, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tfor i := range node.Spec.Taints {\n\t\tif node.Spec.Taints[i].Key == TaintKey {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}", "func (_Staking *StakingCallerSession) IsValidStakingNode(nodeAddr common.Address) (bool, error) {\n\treturn _Staking.Contract.IsValidStakingNode(&_Staking.CallOpts, nodeAddr)\n}", "func ValidateHost(host string, config *Config) error {\n\t_, ok := config.Topology[host]\n\tif !ok {\n\t\treturn fmt.Errorf(\"unable to find info for host %q in config file\", host)\n\t}\n\treturn nil\n}", "func (m *NodeMounter) IsCorruptedMnt(err error) bool {\n\treturn mount.IsCorruptedMnt(err)\n}", "func (s *NodeSystem) IsValid() (bool, []error) {\n\terrors := make([]error, 0)\n\terrors = append(errors, checkForOrphanMultiBranchesNode(s)...)\n\terrors = append(errors, checkForCyclicRedundancyInNodeLinks(s)...)\n\terrors = append(errors, checkForUndeclaredNodeInNodeLink(s)...)\n\terrors = append(errors, checkForMultipleInstanceOfSameNode(s)...)\n\terrors = append(errors, checkForMultipleLinksToNodeWithoutJoinMode(s)...)\n\n\tif len(errors) == 0 {\n\t\treturn true, nil\n\t}\n\treturn false, errors\n}", "func CheckTopologyChanges(rcc *CassandraClusterReconciler, cc *api.CassandraCluster,\n\tstatus *api.CassandraClusterStatus, oldCRD *api.CassandraCluster) (bool, string) {\n\n\tchangelog, _ := diff.Diff(oldCRD.Spec.Topology, cc.Spec.Topology)\n\n\tif hasChange(changelog, diff.UPDATE) ||\n\t\thasChange(changelog, diff.DELETE, \"DC.Rack\", \"-DC\") ||\n\t\thasChange(changelog, diff.CREATE, \"DC.Rack\", \"-DC\") {\n\t\tlogrus.WithFields(logrus.Fields{\"cluster\": cc.Name}).Warningf(\n\t\t\ttopologyChangeRefused+\"No change other than adding/removing a DC can happen: %v restored to %v\",\n\t\t\tcc.Spec.Topology, oldCRD.Spec.Topology)\n\t\treturn true, api.ActionCorrectCRDConfig.Name\n\t}\n\n\tif cc.GetDCSize() < oldCRD.GetDCSize()-1 {\n\t\tlogrus.WithFields(logrus.Fields{\"cluster\": cc.Name}).Warningf(\n\t\t\ttopologyChangeRefused+\"You can only remove 1 DC at a time, \"+\n\t\t\t\t\"not only a Rack: %v restored to %v\", cc.Spec.Topology, oldCRD.Spec.Topology)\n\t\treturn true, api.ActionCorrectCRDConfig.Name\n\t}\n\n\tif cc.GetDCRackSize() < oldCRD.GetDCRackSize() {\n\n\t\tif cc.Status.LastClusterAction == api.ActionScaleDown.Name &&\n\t\t\tcc.Status.LastClusterActionStatus != api.StatusDone {\n\t\t\tlogrus.WithFields(logrus.Fields{\"cluster\": cc.Name}).\n\t\t\t\tWarningf(topologyChangeRefused +\n\t\t\t\t\t\"You must wait to the end of ScaleDown to 0 before deleting a DC\")\n\t\t\treturn true, api.ActionCorrectCRDConfig.Name\n\t\t}\n\n\t\tdcName := cc.GetRemovedDCName(oldCRD)\n\n\t\t//We need to check how many nodes were in the old CRD (before the user delete it)\n\t\tif found, nbNodes := oldCRD.GetDCNodesPerRacksFromName(dcName); found && nbNodes > 0 {\n\t\t\tlogrus.WithFields(logrus.Fields{\"cluster\": cc.Name}).\n\t\t\t\tWarningf(topologyChangeRefused+\n\t\t\t\t\t\"You must scale down the DC %s to 0 before deleting it\", dcName)\n\t\t\treturn true, api.ActionCorrectCRDConfig.Name\n\t\t}\n\n\t\tlogrus.WithFields(logrus.Fields{\"cluster\": cc.Name}).Warningf(\"Removing DC %s\", dcName)\n\n\t\t//We apply this change to the Cluster status\n\t\treturn rcc.deleteDCObjects(cc, status)\n\t}\n\n\treturn false, \"\"\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ParseSoftwareAttribute parses the bytes into a SoftwareAttribute instance.
func ParseSoftwareAttribute(r *read.BigEndian, l uint16) (SoftwareAttribute, error) { sw, err := Read127CharString(r, l) return SoftwareAttribute{sw}, err }
[ "func ParseAttribute(b []byte) (Attribute, error) {\n\tif len(b) < 22 {\n\t\treturn Attribute{}, fmt.Errorf(\"attribute data should be at least 22 bytes but is %d\", len(b))\n\t}\n\n\tr := binutil.NewLittleEndianReader(b)\n\n\tnameLength := r.Byte(0x09)\n\tnameOffset := r.Uint16(0x0A)\n\n\tname := \"\"\n\tif nameLength != 0 {\n\t\tnameBytes := r.Read(int(nameOffset), int(nameLength)*2)\n\t\tname = utf16.DecodeString(nameBytes, binary.LittleEndian)\n\t}\n\n\tresident := r.Byte(0x08) == 0x00\n\tvar attributeData []byte\n\tactualSize := uint64(0)\n\tallocatedSize := uint64(0)\n\tif resident {\n\t\tdataOffset := int(r.Uint16(0x14))\n\t\tuDataLength := r.Uint32(0x10)\n\t\tif int64(uDataLength) > maxInt {\n\t\t\treturn Attribute{}, fmt.Errorf(\"attribute data length %d overflows maximum int value %d\", uDataLength, maxInt)\n\t\t}\n\t\tdataLength := int(uDataLength)\n\t\texpectedDataLength := dataOffset + dataLength\n\n\t\tif len(b) < expectedDataLength {\n\t\t\treturn Attribute{}, fmt.Errorf(\"expected attribute data length to be at least %d but is %d\", expectedDataLength, len(b))\n\t\t}\n\n\t\tattributeData = r.Read(dataOffset, dataLength)\n\t} else {\n\t\tdataOffset := int(r.Uint16(0x20))\n\t\tif len(b) < dataOffset {\n\t\t\treturn Attribute{}, fmt.Errorf(\"expected attribute data length to be at least %d but is %d\", dataOffset, len(b))\n\t\t}\n\t\tallocatedSize = r.Uint64(0x28)\n\t\tactualSize = r.Uint64(0x30)\n\t\tattributeData = r.ReadFrom(int(dataOffset))\n\t}\n\n\treturn Attribute{\n\t\tType: AttributeType(r.Uint32(0)),\n\t\tResident: resident,\n\t\tName: name,\n\t\tFlags: AttributeFlags(r.Uint16(0x0C)),\n\t\tAttributeId: int(r.Uint16(0x0E)),\n\t\tAllocatedSize: allocatedSize,\n\t\tActualSize: actualSize,\n\t\tData: binutil.Duplicate(attributeData),\n\t}, nil\n}", "func (c *StickersCreateStickerSetRequest) SetSoftware(value string) {\n\tc.Flags.Set(3)\n\tc.Software = value\n}", "func parseAttribute(line string) Attribute {\n\tif !strings.HasPrefix(line, AttributeCommentPrefix) {\n\t\treturn nil\n\t}\n\tline = strings.TrimSpace(line[len(AttributeCommentPrefix):])\n\tcolon := strings.IndexRune(line, ':')\n\tvar key, value string\n\tif colon == -1 {\n\t\tkey = line\n\t} else {\n\t\tkey, value = line[:colon], line[colon+1:]\n\t}\n\tswitch key {\n\tcase \"linkage\":\n\t\treturn parseLinkageAttribute(value)\n\tcase \"name\":\n\t\treturn nameAttribute(strings.TrimSpace(value))\n\tcase \"thread_local\":\n\t\treturn tlsAttribute{}\n\tdefault:\n\t\t// FIXME decide what to do here. return error? log warning?\n\t\tpanic(\"unknown attribute key: \" + key)\n\t}\n\treturn nil\n}", "func (o *PostUpdateServicePortsParams) SetSoftware(software string) {\n\to.Software = software\n}", "func ParseAttributes(b []byte) ([]Attribute, error) {\n\tif len(b) == 0 {\n\t\treturn []Attribute{}, nil\n\t}\n\tattributes := make([]Attribute, 0)\n\tfor len(b) > 0 {\n\t\tif len(b) < 4 {\n\t\t\treturn nil, fmt.Errorf(\"attribute header data should be at least 4 bytes but is %d\", len(b))\n\t\t}\n\n\t\tr := binutil.NewLittleEndianReader(b)\n\t\tattrType := r.Uint32(0)\n\t\tif attrType == uint32(AttributeTypeTerminator) {\n\t\t\tbreak\n\t\t}\n\n\t\tif len(b) < 8 {\n\t\t\treturn nil, fmt.Errorf(\"cannot read attribute header record length, data should be at least 8 bytes but is %d\", len(b))\n\t\t}\n\n\t\tuRecordLength := r.Uint32(0x04)\n\t\tif int64(uRecordLength) > maxInt {\n\t\t\treturn nil, fmt.Errorf(\"record length %d overflows maximum int value %d\", uRecordLength, maxInt)\n\t\t}\n\t\trecordLength := int(uRecordLength)\n\t\tif recordLength <= 0 {\n\t\t\treturn nil, fmt.Errorf(\"cannot handle attribute with zero or negative record length %d\", recordLength)\n\t\t}\n\n\t\tif recordLength > len(b) {\n\t\t\treturn nil, fmt.Errorf(\"attribute record length %d exceeds data length %d\", recordLength, len(b))\n\t\t}\n\n\t\trecordData := r.Read(0, recordLength)\n\t\tattribute, err := ParseAttribute(recordData)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tattributes = append(attributes, attribute)\n\t\tb = r.ReadFrom(recordLength)\n\t}\n\treturn attributes, nil\n}", "func parseFamily(b []byte) (Family, error) {\n\tad, err := netlink.NewAttributeDecoder(b)\n\tif err != nil {\n\t\treturn Family{}, err\n\t}\n\n\tvar f Family\n\tfor ad.Next() {\n\t\tswitch ad.Type() {\n\t\tcase unix.CTRL_ATTR_FAMILY_ID:\n\t\t\tf.ID = ad.Uint16()\n\t\tcase unix.CTRL_ATTR_FAMILY_NAME:\n\t\t\tf.Name = ad.String()\n\t\tcase unix.CTRL_ATTR_VERSION:\n\t\t\tv := ad.Uint32()\n\t\t\tif v > math.MaxUint8 {\n\t\t\t\treturn Family{}, errInvalidFamilyVersion\n\t\t\t}\n\n\t\t\tf.Version = uint8(v)\n\t\tcase unix.CTRL_ATTR_MCAST_GROUPS:\n\t\t\tad.Nested(parseMulticastGroups(&f.Groups))\n\t\t}\n\t}\n\n\tif err := ad.Err(); err != nil {\n\t\treturn Family{}, err\n\t}\n\n\treturn f, nil\n}", "func parseStationInfo(b []byte) (*StationInfo, error) {\n\tattrs, err := netlink.UnmarshalAttributes(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar info StationInfo\n\tfor _, a := range attrs {\n\t\tswitch a.Type {\n\t\tcase unix.NL80211_ATTR_MAC:\n\t\t\tinfo.HardwareAddr = net.HardwareAddr(a.Data)\n\t\tcase unix.NL80211_ATTR_STA_INFO:\n\t\t\tnattrs, err := netlink.UnmarshalAttributes(a.Data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif err := (&info).parseAttributes(nattrs); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// Parsed the necessary data.\n\t\t\treturn &info, nil\n\t\t}\n\t}\n\n\t// No station info found\n\treturn nil, os.ErrNotExist\n}", "func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {\n\tsoftIRQStat := SoftIRQStat{}\n\tvar total uint64\n\tvar prefix string\n\n\t_, err := fmt.Sscanf(line, \"%s %d %d %d %d %d %d %d %d %d %d %d\",\n\t\t&prefix, &total,\n\t\t&softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx,\n\t\t&softIRQStat.Block, &softIRQStat.BlockIoPoll,\n\t\t&softIRQStat.Tasklet, &softIRQStat.Sched,\n\t\t&softIRQStat.Hrtimer, &softIRQStat.Rcu)\n\n\tif err != nil {\n\t\treturn SoftIRQStat{}, 0, fmt.Errorf(\"couldn't parse %q (softirq): %w\", line, err)\n\t}\n\n\treturn softIRQStat, total, nil\n}", "func (s *DatabaseResponse) SetSoftwareDetails(v *DatabaseInstanceSoftwareDetailsResponse) *DatabaseResponse {\n\ts.SoftwareDetails = v\n\treturn s\n}", "func NewSoftware(software string) Software {\n\treturn Software(software)\n}", "func ManifestFromBytes(body []byte) (*manifest.A2, error) {\n\t// Try an A2 manifest. If it doesn't have a BuildSHA or returns an error\n\t// then try a versioned manifest.\n\ta2, _ := parseA2Manifest(body)\n\tif a2 != nil && a2.BuildSHA != \"\" {\n\t\treturn a2, nil\n\t}\n\n\tversionedManifest := &versionedManifest{}\n\terr := json.Unmarshal(body, versionedManifest)\n\tif err != nil {\n\t\treturn nil, manifest.NewInvalidSchemaError(err)\n\t}\n\n\tver := versionedManifest.SchemaVersion\n\tswitch ver {\n\tcase \"1\", \"2\":\n\t\treturn parseV1Manifest(body)\n\tdefault:\n\t\treturn nil, manifest.NewInvalidSchemaError(errors.Errorf(\"schema version unknown: %s\", ver))\n\t}\n}", "func parseRelease(release string) (*KernelVersion, error) {\n\tversion := KernelVersion{}\n\n\t// We're only make sure we get the \"kernel\" and \"major revision\". Sometimes we have\n\t// 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64.\n\t_, err := fmt.Sscanf(release, \"%d.%d\", &version.Kernel, &version.Major)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse kernel version %q: %w\", release, err)\n\t}\n\treturn &version, nil\n}", "func (removexattr *FuseRemovexattrIn) ParseBinary(bcontent []byte) error {\n\n\tlength := len(bcontent)\n\tif length > 0 {\n\t\tremovexattr.Name = string(bcontent[:length-1])\n\t}\n\n\treturn nil\n}", "func (i SNSPlatformApplicationAttribute) ParseByName(s string) (SNSPlatformApplicationAttribute, error) {\n\tif val, ok := _SNSPlatformApplicationAttributeNameToValueMap[s]; ok {\n\t\t// parse ok\n\t\treturn val, nil\n\t}\n\n\t// error\n\treturn -1, fmt.Errorf(\"Enum Name of %s Not Expected In SNSPlatformApplicationAttribute Values List\", s)\n}", "func DecodeAttribute(b []byte) (*Attribute, []byte, error) {\n\tif len(b) < SizeofRtAttr {\n\t\treturn nil, nil, netlink.ErrNoData\n\t}\n\n\tlength := *(*uint16)(unsafe.Pointer(&b[0:2][0]))\n\tif uint16(len(b)) < length ||\n\t\tlength < SizeofRtAttr {\n\t\treturn nil, b, netlink.ErrNoData\n\t}\n\n\ta := &Attribute{}\n\ta.Type = *(*uint16)(unsafe.Pointer(&b[2:4][0]))\n\tdata_len := int(length) - RTA_STRUCT_ALEN\n\ta.data = make([]byte, data_len)\n\tcopy(a.data, b[RTA_STRUCT_ALEN:length])\n\n\tr := netlink.Align(int(length), RTA_ALIGNTO)\n\treturn a, b[r:], nil\n}", "func (attr *Attribute) UnmarshalBinary(data []byte) error {\n\t// Read the common portion of the attribute record header\n\tif err := attr.Header.UnmarshalBinary(data); err != nil {\n\t\treturn err\n\t}\n\n\t// Sanity check the record length\n\tif int(attr.Header.RecordLength) > len(data) {\n\t\treturn ErrTruncatedData\n\t}\n\n\t// For the sake of simple bounds checking below, restrict our working\n\t// data set to this record\n\tif len(data) > int(attr.Header.RecordLength) {\n\t\tdata = data[:attr.Header.RecordLength]\n\t}\n\n\t// Read the form-specific portion of the attribute record header\n\tformHeader := data[AttributeRecordHeaderLength:]\n\tif attr.Header.Resident() {\n\t\tif err := attr.Resident.UnmarshalBinary(formHeader); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tif err := attr.Nonresident.UnmarshalBinary(formHeader); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Read the attribute name if it has one\n\tif attr.Header.NameLength > 0 {\n\t\tstart := int(attr.Header.NameOffset)\n\t\tlength := int(attr.Header.NameLength) * 2 // NameLength is in characters, assuming 16-bit unicode\n\t\tend := start + length\n\t\tif end > len(data) {\n\t\t\treturn ErrAttributeNameOutOfBounds\n\t\t}\n\t\tvar err error\n\t\tattr.Name, err = utf16ToString(data[start:end])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Read the attribute value if it's resident and nonzero\n\tif attr.Header.Resident() && attr.Resident.ValueLength > 0 {\n\t\tstart := int(attr.Resident.ValueOffset)\n\t\tlength := int(attr.Resident.ValueLength)\n\t\tend := start + length\n\t\tif end > len(data) {\n\t\t\treturn ErrAttributeValueOutOfBounds\n\t\t}\n\t\tattr.ResidentValue = make([]byte, length)\n\t\tcopy(attr.ResidentValue, data[start:end])\n\t}\n\n\treturn nil\n}", "func ParseAttribute(v []byte, c *Candidate) error {\n\tp := candidateParser{\n\t\tbuf: v,\n\t\tc: c,\n\t}\n\terr := p.parse()\n\treturn err\n}", "func (getattr *FuseGetattrIn) ParseBinary(bcontent []byte) error {\n\terr := common.ParseBinary(bcontent, getattr)\n\n\treturn err\n}", "func (setxattr *FuseSetxattrIn) ParseBinary(bcontent []byte) error {\n\n\tlength := len(bcontent)\n\n\tif length < 8 {\n\t\treturn ErrDataLen\n\t}\n\n\tcommon.ParseBinary(bcontent[:4], &setxattr.Size)\n\tcommon.ParseBinary(bcontent[4:8], &setxattr.Flags)\n\n\tarray := bytes.Split(bcontent[8:], []byte{0})\n\n\tif len(array) < 2 {\n\t\treturn ErrDataLen\n\t}\n\n\tsetxattr.Name = string(array[0])\n\tsetxattr.Value = string(array[1])\n\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SetGlobalBalancer set grpc balancer with scheme.
func SetGlobalBalancer(scheme string, builder selector.Builder) { mu.Lock() defer mu.Unlock() b := base.NewBalancerBuilder( scheme, &Builder{builder: builder}, base.Config{HealthCheck: true}, ) gBalancer.Register(b) }
[ "func (s) TestBalancerSwitch_RoundRobinToGRPCLB(t *testing.T) {\n\tbackends, lbServer, cleanup := setupBackendsAndFakeGRPCLB(t)\n\tdefer cleanup()\n\n\taddrs := stubBackendsToResolverAddrs(backends)\n\tr := manual.NewBuilderWithScheme(\"whatever\")\n\ttarget := fmt.Sprintf(\"%s:///%s\", r.Scheme(), loadBalancedServiceName)\n\tcc, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r))\n\tif err != nil {\n\t\tt.Fatalf(\"grpc.Dial() failed: %v\", err)\n\t}\n\tdefer cc.Close()\n\n\t// Note the use of the deprecated `loadBalancingPolicy` field here instead\n\t// of the now recommended `loadBalancingConfig` field. The logic in the\n\t// ClientConn which decides which balancer to switch to looks at the\n\t// following places in the given order of preference:\n\t// - `loadBalancingConfig` field\n\t// - addresses of type grpclb\n\t// - `loadBalancingPolicy` field\n\t// If we use the `loadBalancingPolicy` field, the switch to \"grpclb\" later on\n\t// in the test will not happen as the ClientConn will continue to use the LB\n\t// policy received in the first update.\n\tscpr := parseServiceConfig(t, r, `{\"loadBalancingPolicy\": \"round_robin\"}`)\n\n\t// Push a resolver update with the service config specifying \"round_robin\".\n\tr.UpdateState(resolver.State{Addresses: addrs[1:], ServiceConfig: scpr})\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tclient := testgrpc.NewTestServiceClient(cc)\n\tif err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Push a resolver update with grpclb and a single balancer address\n\t// pointing to the grpclb server we created above. This will cause the\n\t// channel to switch to the \"grpclb\" balancer, which returns a single\n\t// backend address.\n\tgrpclbConfig := parseServiceConfig(t, r, `{\"loadBalancingPolicy\": \"grpclb\"}`)\n\tstate := resolver.State{ServiceConfig: grpclbConfig}\n\tr.UpdateState(grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: lbServer.Address()}}}))\n\tif err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Switch back to \"round_robin\".\n\tr.UpdateState(resolver.State{Addresses: addrs[1:], ServiceConfig: scpr})\n\tif err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (a *Announce) SetBalancer(name string, ip net.IP) {\n\t// Call doSpam at the end of the function without holding the lock\n\tdefer a.doSpam(ip)\n\ta.Lock()\n\tdefer a.Unlock()\n\n\t// Kubernetes may inform us that we should advertise this address multiple\n\t// times, so just no-op any subsequent requests.\n\tif _, ok := a.ips[name]; ok {\n\t\treturn\n\t}\n\ta.ips[name] = ip\n\n\ta.ipRefcnt[ip.String()]++\n\tif a.ipRefcnt[ip.String()] > 1 {\n\t\t// Multiple services are using this IP, so there's nothing\n\t\t// else to do right now.\n\t\treturn\n\t}\n\n\tfor _, client := range a.ndps {\n\t\tif err := client.Watch(ip); err != nil {\n\t\t\ta.logger.Log(\"op\", \"watchMulticastGroup\", \"error\", err, \"ip\", ip, \"msg\", \"failed to watch NDP multicast group for IP, NDP responder will not respond to requests for this address\")\n\t\t}\n\t}\n}", "func (gt *GoTezos) UseBalancerStrategyRandom() {\n\tgt.balancerStrategy = \"random\"\n}", "func (p *ioThrottlerPool) SetGlobalLimit(r rate.Limit, b int) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tp.globalLimiter.SetBurst(b)\n\tp.globalLimiter.SetLimit(r)\n\tp.updateBufferSize()\n}", "func (p *PBM) SetBalancerStatus(m BalancerMode) error {\n\tvar cmd string\n\n\tswitch m {\n\tcase BalancerModeOn:\n\t\tcmd = \"_configsvrBalancerStart\"\n\tcase BalancerModeOff:\n\t\tcmd = \"_configsvrBalancerStop\"\n\tdefault:\n\t\treturn errors.Errorf(\"unknown mode %s\", m)\n\t}\n\n\terr := p.Conn.Database(\"admin\").RunCommand(p.ctx, bson.D{{cmd, 1}}).Err()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"run mongo command\")\n\t}\n\treturn nil\n}", "func (s) TestGRPCLB_Basic(t *testing.T) {\n\ttss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 1, testUserAgent, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create new load balancer: %v\", err)\n\t}\n\tdefer cleanup()\n\n\t// Push the test backend address to the remote balancer.\n\ttss.ls.sls <- &lbpb.ServerList{\n\t\tServers: []*lbpb.Server{\n\t\t\t{\n\t\t\t\tIpAddress: tss.beIPs[0],\n\t\t\t\tPort: int32(tss.bePorts[0]),\n\t\t\t\tLoadBalanceToken: lbToken,\n\t\t\t},\n\t\t},\n\t}\n\n\t// Configure the manual resolver with an initial state containing a service\n\t// config with grpclb as the load balancing policy and the remote balancer\n\t// address specified via attributes.\n\tr := manual.NewBuilderWithScheme(\"whatever\")\n\ts := &grpclbstate.State{\n\t\tBalancerAddresses: []resolver.Address{\n\t\t\t{\n\t\t\t\tAddr: tss.lbAddr,\n\t\t\t\tServerName: lbServerName,\n\t\t\t},\n\t\t},\n\t}\n\trs := grpclbstate.Set(resolver.State{ServiceConfig: internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(grpclbConfig)}, s)\n\tr.InitialState(rs)\n\n\t// Connect to the test backend.\n\tdopts := []grpc.DialOption{\n\t\tgrpc.WithResolvers(r),\n\t\tgrpc.WithTransportCredentials(&serverNameCheckCreds{}),\n\t\tgrpc.WithContextDialer(fakeNameDialer),\n\t\tgrpc.WithUserAgent(testUserAgent),\n\t}\n\tcc, err := grpc.Dial(r.Scheme()+\":///\"+beServerName, dopts...)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to dial to the backend %v\", err)\n\t}\n\tdefer cc.Close()\n\n\t// Make one successful RPC.\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\ttestC := testgrpc.NewTestServiceClient(cc)\n\tif _, err := testC.EmptyCall(ctx, &testpb.Empty{}); err != nil {\n\t\tt.Fatalf(\"%v.EmptyCall(_, _) = _, %v, want _, <nil>\", testC, err)\n\t}\n}", "func (s) TestBalancerSwitch_pickFirstToGRPCLB(t *testing.T) {\n\tbackends, lbServer, cleanup := setupBackendsAndFakeGRPCLB(t)\n\tdefer cleanup()\n\n\taddrs := stubBackendsToResolverAddrs(backends)\n\tr := manual.NewBuilderWithScheme(\"whatever\")\n\ttarget := fmt.Sprintf(\"%s:///%s\", r.Scheme(), loadBalancedServiceName)\n\tcc, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r))\n\tif err != nil {\n\t\tt.Fatalf(\"grpc.Dial() failed: %v\", err)\n\t}\n\tdefer cc.Close()\n\n\t// Push a resolver update containing no grpclb server address. This should\n\t// lead to the channel using the default LB policy which is pick_first.\n\tr.UpdateState(resolver.State{Addresses: addrs[1:]})\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Push a resolver update with no service config and a single address pointing\n\t// to the grpclb server we created above. This will cause the channel to\n\t// switch to the \"grpclb\" balancer, which returns a single backend address.\n\tgrpclbConfig := parseServiceConfig(t, r, `{\"loadBalancingPolicy\": \"grpclb\"}`)\n\tstate := resolver.State{ServiceConfig: grpclbConfig}\n\tr.UpdateState(grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: lbServer.Address()}}}))\n\tclient := testgrpc.NewTestServiceClient(cc)\n\tif err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Push a resolver update containing a non-existent grpclb server address.\n\t// This should not lead to a balancer switch.\n\tr.UpdateState(grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: \"nonExistentServer\"}}}))\n\tif err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Switch to \"pick_first\" again by sending no grpclb server addresses.\n\temptyConfig := parseServiceConfig(t, r, `{}`)\n\tr.UpdateState(resolver.State{Addresses: addrs[1:], ServiceConfig: emptyConfig})\n\tif err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (s) TestBalancerSwitch_Graceful(t *testing.T) {\n\tbackends, cleanup := startBackendsForBalancerSwitch(t)\n\tdefer cleanup()\n\taddrs := stubBackendsToResolverAddrs(backends)\n\n\tr := manual.NewBuilderWithScheme(\"whatever\")\n\tcc, err := grpc.Dial(r.Scheme()+\":///test.server\", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r))\n\tif err != nil {\n\t\tt.Fatalf(\"grpc.Dial() failed: %v\", err)\n\t}\n\tdefer cc.Close()\n\n\t// Push a resolver update with the service config specifying \"round_robin\".\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tr.UpdateState(resolver.State{\n\t\tAddresses: addrs[1:],\n\t\tServiceConfig: parseServiceConfig(t, r, rrServiceConfig),\n\t})\n\tclient := testgrpc.NewTestServiceClient(cc)\n\tif err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Register a stub balancer which uses a \"pick_first\" balancer underneath and\n\t// signals on a channel when it receives ClientConn updates. But it does not\n\t// forward the ccUpdate to the underlying \"pick_first\" balancer until the test\n\t// asks it to do so. This allows us to test the graceful switch functionality.\n\t// Until the test asks the stub balancer to forward the ccUpdate, RPCs should\n\t// get routed to the old balancer. And once the test gives the go ahead, RPCs\n\t// should get routed to the new balancer.\n\tccUpdateCh := make(chan struct{})\n\twaitToProceed := make(chan struct{})\n\tstub.Register(t.Name(), stub.BalancerFuncs{\n\t\tInit: func(bd *stub.BalancerData) {\n\t\t\tpf := balancer.Get(grpc.PickFirstBalancerName)\n\t\t\tbd.Data = pf.Build(bd.ClientConn, bd.BuildOptions)\n\t\t},\n\t\tUpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error {\n\t\t\tbal := bd.Data.(balancer.Balancer)\n\t\t\tclose(ccUpdateCh)\n\t\t\tgo func() {\n\t\t\t\t<-waitToProceed\n\t\t\t\tbal.UpdateClientConnState(ccs)\n\t\t\t}()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\t// Push a resolver update with the service config specifying our stub\n\t// balancer. We should see a trace event for this balancer switch. But RPCs\n\t// should still be routed to the old balancer since our stub balancer does not\n\t// report a ready picker until we ask it to do so.\n\tr.UpdateState(resolver.State{\n\t\tAddresses: addrs[:1],\n\t\tServiceConfig: r.CC.ParseServiceConfig(fmt.Sprintf(`{\"loadBalancingConfig\": [{\"%v\": {}}]}`, t.Name())),\n\t})\n\tselect {\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Timeout when waiting for a ClientConnState update on the new balancer\")\n\tcase <-ccUpdateCh:\n\t}\n\tif err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Ask our stub balancer to forward the earlier received ccUpdate to the\n\t// underlying \"pick_first\" balancer which will result in a healthy picker\n\t// being reported to the channel. RPCs should start using the new balancer.\n\tclose(waitToProceed)\n\tif err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (s) TestBalancerSwitch_grpclbNotRegistered(t *testing.T) {\n\t// Unregister the grpclb balancer builder for the duration of this test.\n\tgrpclbBuilder := balancer.Get(\"grpclb\")\n\tinternal.BalancerUnregister(grpclbBuilder.Name())\n\tdefer balancer.Register(grpclbBuilder)\n\n\tbackends, cleanup := startBackendsForBalancerSwitch(t)\n\tdefer cleanup()\n\taddrs := stubBackendsToResolverAddrs(backends)\n\n\tr := manual.NewBuilderWithScheme(\"whatever\")\n\tcc, err := grpc.Dial(r.Scheme()+\":///test.server\", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r))\n\tif err != nil {\n\t\tt.Fatalf(\"grpc.Dial() failed: %v\", err)\n\t}\n\tdefer cc.Close()\n\n\t// Push a resolver update which contains a bunch of stub server backends and a\n\t// grpclb server address. The latter should get the ClientConn to try and\n\t// apply the grpclb policy. But since grpclb is not registered, it should\n\t// fallback to the default LB policy which is pick_first. The ClientConn is\n\t// also expected to filter out the grpclb address when sending the addresses\n\t// list fo pick_first.\n\tgrpclbAddr := []resolver.Address{{Addr: \"non-existent-grpclb-server-address\"}}\n\tgrpclbConfig := parseServiceConfig(t, r, `{\"loadBalancingPolicy\": \"grpclb\"}`)\n\tstate := resolver.State{ServiceConfig: grpclbConfig, Addresses: addrs}\n\tr.UpdateState(grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: grpclbAddr}))\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Push a resolver update with the same addresses, but with a service config\n\t// specifying \"round_robin\". The ClientConn is expected to filter out the\n\t// grpclb address when sending the addresses list to round_robin.\n\tr.UpdateState(resolver.State{\n\t\tAddresses: addrs,\n\t\tServiceConfig: parseServiceConfig(t, r, rrServiceConfig),\n\t})\n\tclient := testgrpc.NewTestServiceClient(cc)\n\tif err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (s) TestBalancerSwitch_grpclbToPickFirst(t *testing.T) {\n\tbackends, lbServer, cleanup := setupBackendsAndFakeGRPCLB(t)\n\tdefer cleanup()\n\n\taddrs := stubBackendsToResolverAddrs(backends)\n\tr := manual.NewBuilderWithScheme(\"whatever\")\n\ttarget := fmt.Sprintf(\"%s:///%s\", r.Scheme(), loadBalancedServiceName)\n\tcc, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r))\n\tif err != nil {\n\t\tt.Fatalf(\"grpc.Dial() failed: %v\", err)\n\t}\n\tdefer cc.Close()\n\n\t// Push a resolver update with a GRPCLB service config and a single address\n\t// pointing to the grpclb server we created above. This will cause the\n\t// channel to switch to the \"grpclb\" balancer, which returns a single\n\t// backend address.\n\tgrpclbConfig := parseServiceConfig(t, r, `{\"loadBalancingPolicy\": \"grpclb\"}`)\n\tstate := resolver.State{ServiceConfig: grpclbConfig}\n\tr.UpdateState(grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: lbServer.Address()}}}))\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tclient := testgrpc.NewTestServiceClient(cc)\n\tif err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[0:1]); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Push a resolver update containing a non-existent grpclb server address.\n\t// This should not lead to a balancer switch.\n\tconst nonExistentServer = \"non-existent-grpclb-server-address\"\n\tr.UpdateState(grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: nonExistentServer}}}))\n\tif err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Push a resolver update containing no grpclb server address. This should\n\t// lead to the channel using the default LB policy which is pick_first. The\n\t// list of addresses pushed as part of this update is different from the one\n\t// returned by the \"grpclb\" balancer. So, we should see RPCs going to the\n\t// newly configured backends, as part of the balancer switch.\n\temptyConfig := parseServiceConfig(t, r, `{}`)\n\tr.UpdateState(resolver.State{Addresses: addrs[1:], ServiceConfig: emptyConfig})\n\tif err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (s) TestBalancerSwitch_Basic(t *testing.T) {\n\tbackends, cleanup := startBackendsForBalancerSwitch(t)\n\tdefer cleanup()\n\taddrs := stubBackendsToResolverAddrs(backends)\n\n\tr := manual.NewBuilderWithScheme(\"whatever\")\n\tcc, err := grpc.Dial(r.Scheme()+\":///test.server\", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r))\n\tif err != nil {\n\t\tt.Fatalf(\"grpc.Dial() failed: %v\", err)\n\t}\n\tdefer cc.Close()\n\n\t// Push a resolver update without an LB policy in the service config. The\n\t// channel should pick the default LB policy, which is pick_first.\n\tr.UpdateState(resolver.State{Addresses: addrs})\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Push a resolver update with the service config specifying \"round_robin\".\n\tr.UpdateState(resolver.State{\n\t\tAddresses: addrs,\n\t\tServiceConfig: parseServiceConfig(t, r, rrServiceConfig),\n\t})\n\tclient := testgrpc.NewTestServiceClient(cc)\n\tif err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Push a resolver update with the service config specifying \"pick_first\".\n\tr.UpdateState(resolver.State{\n\t\tAddresses: addrs,\n\t\tServiceConfig: parseServiceConfig(t, r, pickFirstServiceConfig),\n\t})\n\tif err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) {\n\tccb.mu.Lock()\n\tif ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle {\n\t\tccb.mu.Unlock()\n\t\treturn\n\t}\n\n\tccb.mode = m\n\tdone := ccb.serializer.Done()\n\tb := ccb.balancer\n\tok := ccb.serializer.Schedule(func(_ context.Context) {\n\t\t// Close the serializer to ensure that no more calls from gRPC are sent\n\t\t// to the balancer.\n\t\tccb.serializerCancel()\n\t\t// Empty the current balancer name because we don't have a balancer\n\t\t// anymore and also so that we act on the next call to switchTo by\n\t\t// creating a new balancer specified by the new resolver.\n\t\tccb.curBalancerName = \"\"\n\t})\n\tif !ok {\n\t\tccb.mu.Unlock()\n\t\treturn\n\t}\n\tccb.mu.Unlock()\n\n\t// Give enqueued callbacks a chance to finish before closing the balancer.\n\t<-done\n\tb.Close()\n}", "func NewGRPCLBBalancer(r naming.Resolver) Balancer {\n\treturn &balancer{\n\t\tr: r,\n\t}\n}", "func setupBackendsAndFakeGRPCLB(t *testing.T) ([]*stubserver.StubServer, *fakegrpclb.Server, func()) {\n\tbackends, backendsCleanup := startBackendsForBalancerSwitch(t)\n\n\tlbServer, err := fakegrpclb.NewServer(fakegrpclb.ServerParams{\n\t\tLoadBalancedServiceName: loadBalancedServiceName,\n\t\tLoadBalancedServicePort: loadBalancedServicePort,\n\t\tBackendAddresses: []string{backends[0].Address},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create fake grpclb server: %v\", err)\n\t}\n\tgo func() {\n\t\tif err := lbServer.Serve(); err != nil {\n\t\t\tt.Errorf(\"fake grpclb Serve() failed: %v\", err)\n\t\t}\n\t}()\n\n\treturn backends, lbServer, func() {\n\t\tbackendsCleanup()\n\t\tlbServer.Stop()\n\t}\n}", "func (s) TestGRPCLB_BalancerDisconnects(t *testing.T) {\n\tvar (\n\t\ttests []*testServers\n\t\tlbs []*grpc.Server\n\t)\n\tfor i := 0; i < 2; i++ {\n\t\ttss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 1, \"\", nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to create new load balancer: %v\", err)\n\t\t}\n\t\tdefer cleanup()\n\n\t\ttss.ls.sls <- &lbpb.ServerList{\n\t\t\tServers: []*lbpb.Server{\n\t\t\t\t{\n\t\t\t\t\tIpAddress: tss.beIPs[0],\n\t\t\t\t\tPort: int32(tss.bePorts[0]),\n\t\t\t\t\tLoadBalanceToken: lbToken,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\ttests = append(tests, tss)\n\t\tlbs = append(lbs, tss.lb)\n\t}\n\n\t// Configure the manual resolver with an initial state containing a service\n\t// config with grpclb as the load balancing policy and the remote balancer\n\t// addresses specified via attributes.\n\tr := manual.NewBuilderWithScheme(\"whatever\")\n\ts := &grpclbstate.State{\n\t\tBalancerAddresses: []resolver.Address{\n\t\t\t{\n\t\t\t\tAddr: tests[0].lbAddr,\n\t\t\t\tServerName: lbServerName,\n\t\t\t},\n\t\t\t{\n\t\t\t\tAddr: tests[1].lbAddr,\n\t\t\t\tServerName: lbServerName,\n\t\t\t},\n\t\t},\n\t}\n\trs := grpclbstate.Set(resolver.State{ServiceConfig: internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(grpclbConfig)}, s)\n\tr.InitialState(rs)\n\n\tdopts := []grpc.DialOption{\n\t\tgrpc.WithResolvers(r),\n\t\tgrpc.WithTransportCredentials(&serverNameCheckCreds{}),\n\t\tgrpc.WithContextDialer(fakeNameDialer),\n\t}\n\tcc, err := grpc.Dial(r.Scheme()+\":///\"+beServerName, dopts...)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to dial to the backend %v\", err)\n\t}\n\tdefer cc.Close()\n\ttestC := testgrpc.NewTestServiceClient(cc)\n\n\tctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)\n\tdefer cancel()\n\tif err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: tests[0].beListeners[0].Addr().String()}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Stop balancer[0], balancer[1] should be used by grpclb.\n\t// Check peer address to see if that happened.\n\tlbs[0].Stop()\n\tif err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: tests[1].beListeners[0].Addr().String()}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n}", "func (d *discovery) SetLoadBalancer(b LoadBalancer) {\n\td.loadBalancerLock.Lock()\n\tdefer d.loadBalancerLock.Unlock()\n\td.loadBalancer = b\n}", "func setGlobalRestConfig() error {\n\tvar err error\n\trestconfig, err = protoform.GetKubeClientFromOutsideCluster(kubeConfigPath, insecureSkipTLSVerify)\n\tlog.Debugf(\"rest config: %+v\", restconfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func NewBalancer(config *config.BalancerConfig) (*Balancer, error) {\n\tprovider, err := provider.New(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tengine, err := engine.New(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbalancer := &Balancer{\n\t\teventCh: make(chan serf.Event, 64),\n\t\tengine: engine,\n\t\tprovider: provider,\n\t\tlogger: logrus.New(),\n\t\tconfig: config,\n\t}\n\n\tif err = balancer.setupRaft(); err != nil {\n\t\treturn nil, fmt.Errorf(\"error setting up Raft: %v\", err)\n\t}\n\n\tif err = balancer.setupSerf(); err != nil {\n\t\treturn nil, fmt.Errorf(\"error setting up Serf: %v\", err)\n\t}\n\n\t// Flushing all VIPs on the network interface\n\tif err := fusis_net.DelVips(balancer.config.Provider.Params[\"interface\"]); err != nil {\n\t\treturn nil, fmt.Errorf(\"error cleaning up network vips: %v\", err)\n\t}\n\n\tgo balancer.watchLeaderChanges()\n\n\t// Only collect stats if some interval is defined\n\tif config.Stats.Interval > 0 {\n\t\tgo balancer.collectStats()\n\t}\n\n\treturn balancer, nil\n}", "func (ctl *Controller) SetGlobalBrightness(brightness uint8) {\n\tctl.brightness = brightness\n\n\t// Update the buffer to reflect this.\n\tfor i, clr := range ctl.ledColours {\n\t\tctl.updateBuffer(i, clr)\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build creates a grpc Picker.
func (b *Builder) Build(info base.PickerBuildInfo) gBalancer.Picker { if len(info.ReadySCs) == 0 { // Block the RPC until a new picker is available via UpdateState(). return base.NewErrPicker(gBalancer.ErrNoSubConnAvailable) } nodes := make([]selector.Node, 0) for conn, info := range info.ReadySCs { ins, _ := info.Address.Attributes.Value("rawServiceInstance").(*registry.ServiceInstance) nodes = append(nodes, &grpcNode{ Node: selector.NewNode(info.Address.Addr, ins), subConn: conn, }) } p := &Picker{ selector: b.builder.Build(), } p.selector.Apply(nodes) return p }
[ "func (*nodePickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker {\n\tif len(info.ReadySCs) == 0 {\n\t\treturn base.NewErrPickerV2(balancer.ErrNoSubConnAvailable)\n\t}\n\n\tvar scs []balancer.SubConn\n\tfor sc := range info.ReadySCs {\n\t\tscs = append(scs, sc)\n\t}\n\n\treturn &nodePicker{\n\t\tsubConns: scs,\n\t}\n}", "func (b *Builder) Build() (Owner, error) {\n\tsvc, err := New(b.options...)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"service instantiation failed due to options\")\n\t}\n\n\tif err := svc.AddModules(b.modules...); err != nil {\n\t\treturn nil, errors.Wrap(err, \"service modules failed to initialize\")\n\t}\n\n\treturn svc, nil\n}", "func (gp Provider) Build(config config.Credentials) provider.Provider {\n\tclient := NewClient()\n\n\treturn &Provider{\n\t\tVerifier: provider.NewVerifierBasket(\n\t\t\tNewTeamVerifier(teamConfigsToTeam(config.Github.Teams), client),\n\t\t\tNewOrganizationVerifier(config.Github.Organizations, client),\n\t\t),\n\t}\n}", "func (r *Resolver) Build(\n\ttarget resolver.Target,\n\tcc resolver.ClientConn,\n\topts resolver.BuildOptions,\n) (\n\tresolver.Resolver,\n\terror,\n) {\n\tr.logger = zap.L().Named(\"resolver\")\n\tr.clientConn = cc\n\tvar dialOpts []grpc.DialOption\n\tif opts.DialCreds != nil {\n\t\tdialOpts = append(\n\t\t\tdialOpts,\n\t\t\tgrpc.WithTransportCredentials(opts.DialCreds),\n\t\t)\n\t}\n\tr.serviceConfig = r.clientConn.ParseServiceConfig(\n\t\tfmt.Sprintf(`{\"loadBalancingConfig\":[{\"%s\":{}}]}`, Name),\n\t)\n\tvar err error\n\tr.resolverConn, err = grpc.Dial(target.Endpoint, dialOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.ResolveNow(resolver.ResolveNowOptions{})\n\treturn r, nil\n}", "func (f *Factory) Build(providerName string, config config.Credentials) Provider {\n\tprovider, ok := providers.Load(providerName)\n\tif !ok {\n\t\tprovider, _ = providers.Load(\"basic\")\n\t}\n\n\tp := provider.(Provider)\n\treturn p.Build(config)\n}", "func (cupBuilder *CupBuilder) Build() *Cup {\n return cupBuilder.cup\n}", "func (b *AddOnParameterOptionBuilder) Build() (object *AddOnParameterOption, err error) {\n\tobject = new(AddOnParameterOption)\n\tobject.bitmap_ = b.bitmap_\n\tobject.name = b.name\n\tobject.value = b.value\n\treturn\n}", "func (gb *gcpBalancer) regeneratePicker() {\n\tgb.mu.RLock()\n\tdefer gb.mu.RUnlock()\n\n\tif gb.state == connectivity.TransientFailure {\n\t\tgb.picker = newErrPicker(balancer.ErrTransientFailure)\n\t\treturn\n\t}\n\treadyRefs := []*subConnRef{}\n\n\t// Select ready subConns from subConn map.\n\tfor sc, scState := range gb.scStates {\n\t\tif scState == connectivity.Ready {\n\t\t\treadyRefs = append(readyRefs, gb.scRefs[sc])\n\t\t}\n\t}\n\tgb.picker = newGCPPicker(readyRefs, gb)\n}", "func NewPicker(store MediumSourceStorer) *Picker {\n\treturn &Picker{\n\t\tstore: store,\n\t}\n}", "func (*producerBuilder) Build(cci any) (balancer.Producer, func()) {\n\tp := &producer{\n\t\tclient: v3orcaservicegrpc.NewOpenRcaServiceClient(cci.(grpc.ClientConnInterface)),\n\t\tintervals: make(map[time.Duration]int),\n\t\tlisteners: make(map[OOBListener]struct{}),\n\t\tbackoff: internal.DefaultBackoffFunc,\n\t}\n\treturn p, func() {\n\t\t<-p.stopped\n\t}\n}", "func newPeerPicker(proto FetchProtocol, selfURL string, options *HashOptions) *PeerPicker {\n\tpp := &PeerPicker{\n\t\tfetchingProtocol: proto,\n\t\tselfURL: selfURL,\n\t\tfetchers: make(map[string]RemoteFetcher),\n\t}\n\tif options != nil {\n\t\tpp.opts = *options\n\t}\n\tif pp.opts.Replicas == 0 {\n\t\tpp.opts.Replicas = defaultReplicas\n\t}\n\tpp.peers = consistenthash.New(pp.opts.Replicas, pp.opts.HashFn)\n\treturn pp\n}", "func (b *GameEditionPCBuilder) Build() *PersonalComputer {\n\treturn &b.pc\n}", "func NewPickUp() Pickup {\n p := Pickup{name: \"Pickup\", vehicle: \"Pickup\", speed: 60, capacity: 2, isPrivate: true}\n return p\n}", "func (bb *gcpBalancerBuilder) Build(\n\tcc balancer.ClientConn,\n\topt balancer.BuildOptions,\n) balancer.Balancer {\n\treturn &gcpBalancer{\n\t\tcc: cc,\n\t\taffinityMap: make(map[string]balancer.SubConn),\n\t\tscRefs: make(map[balancer.SubConn]*subConnRef),\n\t\tscStates: make(map[balancer.SubConn]connectivity.State),\n\t\tcsEvltr: &connectivityStateEvaluator{},\n\t\t// Initialize picker to a picker that always return\n\t\t// ErrNoSubConnAvailable, because when state of a SubConn changes, we\n\t\t// may call UpdateState with this picker.\n\t\tpicker: newErrPicker(balancer.ErrNoSubConnAvailable),\n\t}\n}", "func (b *Builder) Build() *corev1.PodSpec { return b.spec }", "func NewPeerPicker(tracker *Tracker, list *memberlist.Memberlist) PeerPicker {\n\treturn PeerPicker{\n\t\ttracker: tracker,\n\t\tourName: list.LocalNode().Name,\n\t}\n}", "func (p *PopupWidget) Build() {\n\tif imgui.BeginPopup(p.name, int(p.flags)) {\n\t\tp.layout.Build()\n\t\timgui.EndPopup()\n\t}\n}", "func (opts BuilderOptions) Build(b Builder) (desc.Descriptor, error) {\n\treturn doBuild(b, opts)\n}", "func (b *HomeEditionPCBuilder) Build() *PersonalComputer {\n\treturn &b.pc\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use ThresholdSpec.ProtoReflect.Descriptor instead.
func (*ThresholdSpec) Descriptor() ([]byte, []int) { return file_api_adaptive_load_metric_spec_proto_rawDescGZIP(), []int{1} }
[ "func (*MetricSpecWithThreshold) Descriptor() ([]byte, []int) {\n\treturn file_api_adaptive_load_metric_spec_proto_rawDescGZIP(), []int{2}\n}", "func (*TelemetryThreshold) Descriptor() ([]byte, []int) {\n\treturn file_huawei_telemetry_proto_rawDescGZIP(), []int{5}\n}", "func (*ThresholdIdx) Descriptor() ([]byte, []int) {\n\treturn file_storage_proto_rawDescGZIP(), []int{10}\n}", "func (*NodeThreshold) Descriptor() ([]byte, []int) {\n\treturn file_models_model_node_threshold_proto_rawDescGZIP(), []int{0}\n}", "func (*LatencyThresholdOverride) Descriptor() ([]byte, []int) {\n\treturn file_envoy_extensions_filters_network_zookeeper_proxy_v3_zookeeper_proxy_proto_rawDescGZIP(), []int{1}\n}", "func (*AlertingCondition_Spec_TimeSeries_Threshold) Descriptor() ([]byte, []int) {\n\treturn edgelq_monitoring_proto_v3_alerting_condition_proto_rawDescGZIP(), []int{0, 0, 0, 1}\n}", "func (*CircuitBreakers_Thresholds) Descriptor() ([]byte, []int) {\n\treturn file_envoy_api_v2_cluster_circuit_breaker_proto_rawDescGZIP(), []int{0, 0}\n}", "func (*RateLimitingSampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{4}\n}", "func (*TargetMetrics) Descriptor() ([]byte, []int) {\n\treturn file_asgt_type_target_metrics_proto_rawDescGZIP(), []int{0}\n}", "func (*LivenessProbeCF) Descriptor() ([]byte, []int) {\n\treturn file_pkg_kascfg_kascfg_proto_rawDescGZIP(), []int{13}\n}", "func (*OperationTypeThreshold) Descriptor() ([]byte, []int) {\n\treturn file_chain_proto_rawDescGZIP(), []int{7}\n}", "func (*TelemetryAssertionViolation) Descriptor() ([]byte, []int) {\n\treturn file_cl_offchainreporting_telemetry_proto_rawDescGZIP(), []int{4}\n}", "func (*WatchMetricDescriptorRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_monitoring_proto_v3_metric_descriptor_service_proto_rawDescGZIP(), []int{2}\n}", "func (*GetWarnRequest) Descriptor() ([]byte, []int) {\n\treturn file_punishments_punishments_proto_rawDescGZIP(), []int{35}\n}", "func (*DurationLT) Descriptor() ([]byte, []int) {\n\treturn file_buf_validate_conformance_cases_wkt_duration_proto_rawDescGZIP(), []int{5}\n}", "func (*ConstantSampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{3}\n}", "func (*GetMonitoredResourceDescriptorRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{2}\n}", "func (*MethodCallingThreshold) Descriptor() ([]byte, []int) {\n\treturn file_client_proto_rawDescGZIP(), []int{17}\n}", "func (*TargetMetrics_Metric) Descriptor() ([]byte, []int) {\n\treturn file_asgt_type_target_metrics_proto_rawDescGZIP(), []int{0, 0}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use MetricSpecWithThreshold.ProtoReflect.Descriptor instead.
func (*MetricSpecWithThreshold) Descriptor() ([]byte, []int) { return file_api_adaptive_load_metric_spec_proto_rawDescGZIP(), []int{2} }
[ "func (*ThresholdSpec) Descriptor() ([]byte, []int) {\n\treturn file_api_adaptive_load_metric_spec_proto_rawDescGZIP(), []int{1}\n}", "func (*TelemetryThreshold) Descriptor() ([]byte, []int) {\n\treturn file_huawei_telemetry_proto_rawDescGZIP(), []int{5}\n}", "func (*WatchMetricDescriptorRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_monitoring_proto_v3_metric_descriptor_service_proto_rawDescGZIP(), []int{2}\n}", "func (*LatencyThresholdOverride) Descriptor() ([]byte, []int) {\n\treturn file_envoy_extensions_filters_network_zookeeper_proxy_v3_zookeeper_proxy_proto_rawDescGZIP(), []int{1}\n}", "func (*AlertingCondition_Spec_TimeSeries_Threshold) Descriptor() ([]byte, []int) {\n\treturn edgelq_monitoring_proto_v3_alerting_condition_proto_rawDescGZIP(), []int{0, 0, 0, 1}\n}", "func (*GetMonitoredResourceDescriptorRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{2}\n}", "func (*WatchMetricDescriptorsRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_monitoring_proto_v3_metric_descriptor_service_proto_rawDescGZIP(), []int{4}\n}", "func (*TargetMetrics_Metric) Descriptor() ([]byte, []int) {\n\treturn file_asgt_type_target_metrics_proto_rawDescGZIP(), []int{0, 0}\n}", "func (*TargetMetrics) Descriptor() ([]byte, []int) {\n\treturn file_asgt_type_target_metrics_proto_rawDescGZIP(), []int{0}\n}", "func (*KafkaMeter) Descriptor() ([]byte, []int) {\n\treturn file_pkg_sinks_plugin_proto_metrics_proto_rawDescGZIP(), []int{0}\n}", "func (*ThresholdIdx) Descriptor() ([]byte, []int) {\n\treturn file_storage_proto_rawDescGZIP(), []int{10}\n}", "func (*CreateMetricDescriptorRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{6}\n}", "func (*GetMetricDescriptorRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{5}\n}", "func (*CircuitBreakers_Thresholds) Descriptor() ([]byte, []int) {\n\treturn file_envoy_api_v2_cluster_circuit_breaker_proto_rawDescGZIP(), []int{0, 0}\n}", "func (*FeaturestoreMonitoringConfig_ThresholdConfig) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_featurestore_monitoring_proto_rawDescGZIP(), []int{0, 2}\n}", "func (*Metrics) Descriptor() ([]byte, []int) {\n\treturn file_api_mesh_v1alpha1_metrics_proto_rawDescGZIP(), []int{0}\n}", "func (*NodeThreshold) Descriptor() ([]byte, []int) {\n\treturn file_models_model_node_threshold_proto_rawDescGZIP(), []int{0}\n}", "func (*RateLimitingSampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{4}\n}", "func (*OnTargetAudienceMetrics) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v2_services_reach_plan_service_proto_rawDescGZIP(), []int{19}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use Scorecard.ProtoReflect.Descriptor instead.
func (*Scorecard) Descriptor() ([]byte, []int) { return file_google_monitoring_dashboard_v1_scorecard_proto_rawDescGZIP(), []int{0} }
[ "func (*Score) Descriptor() ([]byte, []int) {\n\treturn file_mitre_cvss_v3_cvss_proto_rawDescGZIP(), []int{2}\n}", "func (*BaseScore) Descriptor() ([]byte, []int) {\n\treturn file_mitre_cvss_v3_cvss_proto_rawDescGZIP(), []int{3}\n}", "func (*CMsgPlayerCard) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_common_proto_rawDescGZIP(), []int{38}\n}", "func (*BaseScoreInfo) Descriptor() ([]byte, []int) {\n\treturn file_proto_score_score_proto_rawDescGZIP(), []int{0}\n}", "func (*Score) Descriptor() ([]byte, []int) {\n\treturn file_leaderboard_enriching_proto_webhook_v1_podium_webhook_proto_rawDescGZIP(), []int{3}\n}", "func (*CMsgPlayerConductScorecard) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{168}\n}", "func (*UpdateScoreRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_api_proto_rawDescGZIP(), []int{3}\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func (*Decl) Descriptor() ([]byte, []int) {\n\treturn file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2}\n}", "func (StandardPTransforms_DeprecatedPrimitives) EnumDescriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{4, 1}\n}", "func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}", "func (*SafetyFeedback) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_safety_proto_rawDescGZIP(), []int{1}\n}", "func (*CMsgPlayerConductScorecardRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{167}\n}", "func (*Modifier) Descriptor() ([]byte, []int) {\n\treturn file_FillerGame_proto_rawDescGZIP(), []int{6}\n}", "func (*TestProto) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_utils_test_proto_proto_rawDescGZIP(), []int{0}\n}", "func (*CodeSystemDeprecated) Descriptor() ([]byte, []int) {\n\treturn file_proto_google_fhir_proto_stu3_extensions_proto_rawDescGZIP(), []int{38}\n}", "func ProtoFromDescriptor(d protoreflect.Descriptor) proto.Message {\n\tswitch d := d.(type) {\n\tcase protoreflect.FileDescriptor:\n\t\treturn ProtoFromFileDescriptor(d)\n\tcase protoreflect.MessageDescriptor:\n\t\treturn ProtoFromMessageDescriptor(d)\n\tcase protoreflect.FieldDescriptor:\n\t\treturn ProtoFromFieldDescriptor(d)\n\tcase protoreflect.OneofDescriptor:\n\t\treturn ProtoFromOneofDescriptor(d)\n\tcase protoreflect.EnumDescriptor:\n\t\treturn ProtoFromEnumDescriptor(d)\n\tcase protoreflect.EnumValueDescriptor:\n\t\treturn ProtoFromEnumValueDescriptor(d)\n\tcase protoreflect.ServiceDescriptor:\n\t\treturn ProtoFromServiceDescriptor(d)\n\tcase protoreflect.MethodDescriptor:\n\t\treturn ProtoFromMethodDescriptor(d)\n\tdefault:\n\t\t// WTF??\n\t\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\t\treturn res.AsProto()\n\t\t}\n\t\treturn nil\n\t}\n}", "func (*ModifyClubInformRequest) Descriptor() ([]byte, []int) {\n\treturn file_club_leader_proto_rawDescGZIP(), []int{6}\n}", "func (*AddPeerResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{30}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use Scorecard_GaugeView.ProtoReflect.Descriptor instead.
func (*Scorecard_GaugeView) Descriptor() ([]byte, []int) { return file_google_monitoring_dashboard_v1_scorecard_proto_rawDescGZIP(), []int{0, 0} }
[ "func (*KafkaGauge) Descriptor() ([]byte, []int) {\n\treturn file_pkg_sinks_plugin_proto_metrics_proto_rawDescGZIP(), []int{1}\n}", "func (*CLRMetric) Descriptor() ([]byte, []int) {\n\treturn file_language_agent_CLRMetric_proto_rawDescGZIP(), []int{1}\n}", "func (*Metrics) Descriptor() ([]byte, []int) {\n\treturn file_api_mesh_v1alpha1_metrics_proto_rawDescGZIP(), []int{0}\n}", "func (*UpdateMetricDescriptorRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_monitoring_proto_v3_metric_descriptor_service_proto_rawDescGZIP(), []int{6}\n}", "func (*WatchMetricDescriptorsResponse_PageTokenChange) Descriptor() ([]byte, []int) {\n\treturn edgelq_monitoring_proto_v3_metric_descriptor_service_proto_rawDescGZIP(), []int{5, 0}\n}", "func (*CLRMetricCollection) Descriptor() ([]byte, []int) {\n\treturn file_language_agent_CLRMetric_proto_rawDescGZIP(), []int{0}\n}", "func (*CLRMetricCollection) Descriptor() ([]byte, []int) {\n\treturn file_language_agent_v2_CLRMetric_proto_rawDescGZIP(), []int{0}\n}", "func (*Metrics) Descriptor() ([]byte, []int) {\n\treturn file_grpc_proto_rawDescGZIP(), []int{0}\n}", "func (*CMsgClientToGCPlayerStatsRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{143}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*WatchMetricDescriptorRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_monitoring_proto_v3_metric_descriptor_service_proto_rawDescGZIP(), []int{2}\n}", "func (*Metric) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_analysis_proto_v1_metrics_proto_rawDescGZIP(), []int{2}\n}", "func (*FeedbackMetrics) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{12}\n}", "func (*TargetMetrics_Metric) Descriptor() ([]byte, []int) {\n\treturn file_asgt_type_target_metrics_proto_rawDescGZIP(), []int{0, 0}\n}", "func (m *megasquirt_gp18) Descriptor() *descriptor.Message {\n\treturn Messages().megasquirt_gp18.Message\n}", "func (*OnTargetAudienceMetrics) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v2_services_reach_plan_service_proto_rawDescGZIP(), []int{19}\n}", "func (*UpdateMetricDescriptorRequest_CAS) Descriptor() ([]byte, []int) {\n\treturn edgelq_monitoring_proto_v3_metric_descriptor_service_proto_rawDescGZIP(), []int{6, 0}\n}", "func (*MetricWrapper) Descriptor() ([]byte, []int) {\n\treturn file_zenoss_cloud_data_receiver_proto_rawDescGZIP(), []int{14}\n}", "func (*GetMetricDescriptorRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{5}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use Scorecard_SparkChartView.ProtoReflect.Descriptor instead.
func (*Scorecard_SparkChartView) Descriptor() ([]byte, []int) { return file_google_monitoring_dashboard_v1_scorecard_proto_rawDescGZIP(), []int{0, 1} }
[ "func (*ChartView) Descriptor() ([]byte, []int) {\n\treturn file_view_proto_rawDescGZIP(), []int{0}\n}", "func (*FeedbackMetrics) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{12}\n}", "func (*CodeSystemDeprecated) Descriptor() ([]byte, []int) {\n\treturn file_proto_google_fhir_proto_stu3_extensions_proto_rawDescGZIP(), []int{38}\n}", "func (*Scorecard_GaugeView) Descriptor() ([]byte, []int) {\n\treturn file_google_monitoring_dashboard_v1_scorecard_proto_rawDescGZIP(), []int{0, 0}\n}", "func (*Chart) Descriptor() ([]byte, []int) {\n\treturn file_helm_api_proto_rawDescGZIP(), []int{20}\n}", "func (*ChartInfo) Descriptor() ([]byte, []int) {\n\treturn file_helm_api_proto_rawDescGZIP(), []int{24}\n}", "func (*ChartOptions) Descriptor() ([]byte, []int) {\n\treturn file_google_monitoring_dashboard_v1_xychart_proto_rawDescGZIP(), []int{1}\n}", "func (*Model_ClusteringMetrics) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_bigquery_v2_model_proto_rawDescGZIP(), []int{0, 6}\n}", "func (*PostModelVersionMetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{93}\n}", "func (*ListTrendingMetricsViewsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{172}\n}", "func (*GetModelVersionMetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{94}\n}", "func (*XyChart) Descriptor() ([]byte, []int) {\n\treturn file_google_monitoring_dashboard_v1_xychart_proto_rawDescGZIP(), []int{0}\n}", "func (*ListTimeSeriesRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_monitoring_proto_v3_time_serie_custom_proto_rawDescGZIP(), []int{0}\n}", "func (*OnTargetAudienceMetrics) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v2_services_reach_plan_service_proto_rawDescGZIP(), []int{19}\n}", "func (*ReadTensorboardUsageResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_tensorboard_service_proto_rawDescGZIP(), []int{7}\n}", "func (*Model_ClusteringMetrics_Cluster) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_bigquery_v2_model_proto_rawDescGZIP(), []int{0, 6, 0}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*ReadTensorboardTimeSeriesDataResponse) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_aiplatform_v1_tensorboard_service_proto_rawDescGZIP(), []int{37}\n}", "func (*Scorecard) Descriptor() ([]byte, []int) {\n\treturn file_google_monitoring_dashboard_v1_scorecard_proto_rawDescGZIP(), []int{0}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AESConfiguration.
func (in *AESConfiguration) DeepCopy() *AESConfiguration { if in == nil { return nil } out := new(AESConfiguration) in.DeepCopyInto(out) return out }
[ "func (in *EncryptionConfiguration) DeepCopy() *EncryptionConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(EncryptionConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *EncryptionConfig) DeepCopy() *EncryptionConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(EncryptionConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *CryptoConfig) DeepCopy() *CryptoConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(CryptoConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *SecretEngineConfiguration) DeepCopy() *SecretEngineConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SecretEngineConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *Encryption) DeepCopy() *Encryption {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Encryption)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *AutoMLSecurityConfig) DeepCopy() *AutoMLSecurityConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AutoMLSecurityConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewAES(key []byte) *AES {\n\tvar err error\n\tae := &AES{}\n\tae.keyinitial(key)\n\tae.ivinitial(ae.key)\n\tae.block, err = aes.NewCipher(ae.key)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"aes.NewCipher failed: %s\", err.Error()))\n\t}\n\tae.blockEncrypt = cipher.NewCBCEncrypter(ae.block, ae.iv)\n\tae.blockDecrypt = cipher.NewCBCDecrypter(ae.block, ae.iv)\n\treturn ae\n}", "func (in *SecretboxConfiguration) DeepCopy() *SecretboxConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SecretboxConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewAES(aesKey, hmacKey []byte) (*AESCTREncryptor, error) {\n\tif len(hmacKey) == 0 {\n\t\treturn nil, ErrHmacKeyTooShort\n\t}\n\n\tblock, err := aes.NewCipher(aesKey)\n\tif err != nil {\n\t\treturn nil, ErrKeyTooShort\n\t}\n\n\treturn &AESCTREncryptor{\n\t\taesKey: aesKey,\n\t\thmacKey: hmacKey,\n\t\tblock: block,\n\t}, nil\n}", "func (in *AWSConfiguration) DeepCopy() *AWSConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AWSConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewAESCrypto(key []byte, iv ...byte) (*AESCrypto, error) {\n\tb, err := aes.NewCipher(key)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := &AESCrypto{\n\t\tblock: b,\n\t\tkey: key,\n\t\tiv: iv,\n\t}\n\n\tif len(iv) == 0 {\n\t\tr.iv = key[:b.BlockSize()]\n\t}\n\n\treturn r, nil\n}", "func (in *ActiveDirectoryConfig) DeepCopy() *ActiveDirectoryConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ActiveDirectoryConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *DataEncryption) DeepCopy() *DataEncryption {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(DataEncryption)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *EKSCFConfiguration) DeepCopy() *EKSCFConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(EKSCFConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *AcceleratorConfig) DeepCopy() *AcceleratorConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AcceleratorConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *EncryptionProperties) DeepCopy() *EncryptionProperties {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(EncryptionProperties)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *AzureConfiguration) DeepCopy() *AzureConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AzureConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewAES(\n\tmode modeType,\n\tkey string,\n\tpassword string,\n\tencoding encodingType,\n) (AES, error) {\n\n\tif mode == \"\" {\n\t\tmode = ModeGCM\n\t}\n\tif mode == ModeGCM && password == \"\" {\n\t\treturn nil, errors.New(\"password is required in gcm mode\")\n\t}\n\tif mode != ModeGCM && mode != ModeCBC {\n\t\treturn nil, errors.New(\"only support gcm and cbc mode\")\n\t}\n\n\tif key == \"\" {\n\t\tkey = DefaultKey\n\t}\n\tkeyBuf, err := base64.StdEncoding.DecodeString(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkeySize := len(keyBuf) * 8\n\tif keySize != 256 {\n\t\tif mode == ModeCBC {\n\t\t\treturn nil, errors.New(\"key requires a 256-bit base64 encoded string with cbc mode\")\n\t\t}\n\t\tif keySize != 128 && keySize != 192 {\n\t\t\treturn nil, errors.New(\"key requires a 128-bit, 192-bit or 256-bit base64 encoded string\")\n\t\t}\n\t}\n\n\tif encoding == \"\" {\n\t\tencoding = Base64\n\t}\n\n\tif mode == ModeGCM {\n\t\treturn &GCM{\n\t\t\tKey: keyBuf,\n\t\t\tPassword: password,\n\t\t\tVersion: []byte{0x01, 0x03},\n\t\t\tEncoding: encoding,\n\t\t}, nil\n\t}\n\treturn &CBC{\n\t\tKey: keyBuf,\n\t\tVersion: []byte{0x01, 0x04},\n\t\tEncoding: encoding,\n\t}, nil\n}", "func (in *ApplicationConfiguration) DeepCopy() *ApplicationConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ApplicationConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfiguration.
func (in *EncryptionConfiguration) DeepCopy() *EncryptionConfiguration { if in == nil { return nil } out := new(EncryptionConfiguration) in.DeepCopyInto(out) return out }
[ "func (in *EncryptionConfig) DeepCopy() *EncryptionConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(EncryptionConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *Encryption) DeepCopy() *Encryption {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Encryption)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (s *DataLakeResource) SetEncryptionConfiguration(v *DataLakeEncryptionConfiguration) *DataLakeResource {\n\ts.EncryptionConfiguration = v\n\treturn s\n}", "func (in *EncryptionProperties) DeepCopy() *EncryptionProperties {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(EncryptionProperties)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (s *DataLakeConfiguration) SetEncryptionConfiguration(v *DataLakeEncryptionConfiguration) *DataLakeConfiguration {\n\ts.EncryptionConfiguration = v\n\treturn s\n}", "func (in *AESConfiguration) DeepCopy() *AESConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AESConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (s *S3ExportConfiguration) SetEncryptionConfiguration(v *S3EncryptionConfiguration) *S3ExportConfiguration {\n\ts.EncryptionConfiguration = v\n\treturn s\n}", "func (in *DataEncryption) DeepCopy() *DataEncryption {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(DataEncryption)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *CryptoConfig) DeepCopy() *CryptoConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(CryptoConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *DatabaseEncryption) DeepCopy() *DatabaseEncryption {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(DatabaseEncryption)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (s *KinesisVideoStreamConfig) SetEncryptionConfig(v *EncryptionConfig) *KinesisVideoStreamConfig {\n\ts.EncryptionConfig = v\n\treturn s\n}", "func (in *EncryptionWithCmk) DeepCopy() *EncryptionWithCmk {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(EncryptionWithCmk)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (s *S3Config) SetEncryptionConfig(v *EncryptionConfig) *S3Config {\n\ts.EncryptionConfig = v\n\treturn s\n}", "func (o DatabaseOutput) EncryptionConfig() DatabaseEncryptionConfigPtrOutput {\n\treturn o.ApplyT(func(v *Database) DatabaseEncryptionConfigPtrOutput { return v.EncryptionConfig }).(DatabaseEncryptionConfigPtrOutput)\n}", "func (in *EncryptKey) DeepCopy() *EncryptKey {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(EncryptKey)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *SecretEngineConfiguration) DeepCopy() *SecretEngineConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SecretEngineConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (s *GetEncryptionConfigOutput) SetEncryptionConfig(v *EncryptionConfig) *GetEncryptionConfigOutput {\n\ts.EncryptionConfig = v\n\treturn s\n}", "func (in *NetworkConfiguration) DeepCopy() *NetworkConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(NetworkConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (s *PutEncryptionConfigOutput) SetEncryptionConfig(v *EncryptionConfig) *PutEncryptionConfigOutput {\n\ts.EncryptionConfig = v\n\treturn s\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityConfiguration.
func (in *IdentityConfiguration) DeepCopy() *IdentityConfiguration { if in == nil { return nil } out := new(IdentityConfiguration) in.DeepCopyInto(out) return out }
[ "func (in *WorkloadIdentityConfig) DeepCopy() *WorkloadIdentityConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(WorkloadIdentityConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *Identity) DeepCopy() *Identity {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Identity)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *IdentityParameters) DeepCopy() *IdentityParameters {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(IdentityParameters)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ContrailCNIConfiguration) DeepCopy() *ContrailCNIConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ContrailCNIConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *OIDCConfig) DeepCopy() *OIDCConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(OIDCConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *IPAMConfiguration) DeepCopy() *IPAMConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(IPAMConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *IsoConfiguration) DeepCopy() *IsoConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(IsoConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *IpConfiguration) DeepCopy() *IpConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(IpConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *IdentityProvider) DeepCopy() *IdentityProvider {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(IdentityProvider)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *IstioConfig) DeepCopy() *IstioConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(IstioConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KeyCloakOIDCConfig) DeepCopy() *KeyCloakOIDCConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KeyCloakOIDCConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *PKIConfig) DeepCopy() *PKIConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PKIConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *IPIPConfiguration) DeepCopy() *IPIPConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(IPIPConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *OCIConfiguration) DeepCopy() *OCIConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(OCIConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *IdentityProviderParameters) DeepCopy() *IdentityProviderParameters {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(IdentityProviderParameters)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (s *DatastoreProperties) SetIdentityProviderConfiguration(v *IdentityProviderConfiguration) *DatastoreProperties {\n\ts.IdentityProviderConfiguration = v\n\treturn s\n}", "func (in *LabelIdentity) DeepCopy() *LabelIdentity {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(LabelIdentity)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (c *Provider) IdentityConfig() msp.IdentityConfig {\n\treturn c.identityConfig\n}", "func (in *IPConfig) DeepCopy() *IPConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(IPConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfiguration.
func (in *KMSConfiguration) DeepCopy() *KMSConfiguration { if in == nil { return nil } out := new(KMSConfiguration) in.DeepCopyInto(out) return out }
[ "func (in *KalmConfig) DeepCopy() *KalmConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KalmConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KubemanagerConfiguration) DeepCopy() *KubemanagerConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KubemanagerConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KubemanagerServiceConfiguration) DeepCopy() *KubemanagerServiceConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KubemanagerServiceConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KubemanagerConfig) DeepCopy() *KubemanagerConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KubemanagerConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KmsKeyParameters) DeepCopy() *KmsKeyParameters {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KmsKeyParameters)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *EKSManagedConfiguration) DeepCopy() *EKSManagedConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(EKSManagedConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KubemanagerManagerServiceConfiguration) DeepCopy() *KubemanagerManagerServiceConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KubemanagerManagerServiceConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KubeadmConfig) DeepCopy() *KubeadmConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KubeadmConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KopsConfig) DeepCopy() *KopsConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KopsConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *EKSCFConfiguration) DeepCopy() *EKSCFConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(EKSCFConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *K3sConfig) DeepCopy() *K3sConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(K3sConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KVConfiguration) DeepCopy() *KVConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KVConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KubemanagerNodesConfiguration) DeepCopy() *KubemanagerNodesConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KubemanagerNodesConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KmsKeySpec) DeepCopy() *KmsKeySpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KmsKeySpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KubebenchConfig) DeepCopy() *KubebenchConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KubebenchConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KubeadmConfigList) DeepCopy() *KubeadmConfigList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KubeadmConfigList)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KmsKeyList) DeepCopy() *KmsKeyList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KmsKeyList)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KubeadmConfigSpec) DeepCopy() *KubeadmConfigSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KubeadmConfigSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *SmsConfigurationType) DeepCopy() *SmsConfigurationType {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SmsConfigurationType)\n\tin.DeepCopyInto(out)\n\treturn out\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Key.
func (in *Key) DeepCopy() *Key { if in == nil { return nil } out := new(Key) in.DeepCopyInto(out) return out }
[ "func (in *KeyRef) DeepCopy() *KeyRef {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KeyRef)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KeyReference) DeepCopy() *KeyReference {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KeyReference)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *CertKey) DeepCopy() *CertKey {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(CertKey)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KmsKey) DeepCopy() *KmsKey {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KmsKey)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KeyMetadata) DeepCopy() *KeyMetadata {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KeyMetadata)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *SignerKey) DeepCopy() *SignerKey {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SignerKey)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *TrustKey) DeepCopy() *TrustKey {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(TrustKey)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *NameKey) DeepCopy() *NameKey {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(NameKey)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *SignKey) DeepCopy() *SignKey {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SignKey)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KeyParameters) DeepCopy() *KeyParameters {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KeyParameters)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KMSKey) DeepCopy() *KMSKey {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KMSKey)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KeyList) DeepCopy() *KeyList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KeyList)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KeySpec) DeepCopy() *KeySpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KeySpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewKey(kdf kdf.HKDF, key []byte) *Key {\n\trootKey := Key{\n\t\tkdf: kdf,\n\t\tkey: key,\n\t}\n\n\treturn &rootKey\n}", "func (in *MembershipMemberKey) DeepCopy() *MembershipMemberKey {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(MembershipMemberKey)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ServiceKey) DeepCopy() *ServiceKey {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ServiceKey)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *LicenseKey) DeepCopy() *LicenseKey {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(LicenseKey)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ProjectKey) DeepCopy() *ProjectKey {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ProjectKey)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *EncryptKey) DeepCopy() *EncryptKey {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(EncryptKey)\n\tin.DeepCopyInto(out)\n\treturn out\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfiguration.
func (in *ProviderConfiguration) DeepCopy() *ProviderConfiguration { if in == nil { return nil } out := new(ProviderConfiguration) in.DeepCopyInto(out) return out }
[ "func (in *CloudflareProviderConfig) DeepCopy() *CloudflareProviderConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(CloudflareProviderConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *Route53ProviderConfig) DeepCopy() *Route53ProviderConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Route53ProviderConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *Provider) DeepCopy() *Provider {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Provider)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ProviderSettings) DeepCopy() *ProviderSettings {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ProviderSettings)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ProviderDescription) DeepCopy() *ProviderDescription {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ProviderDescription)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *AlidnsProviderConfig) DeepCopy() *AlidnsProviderConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AlidnsProviderConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *VsphereClusterProviderConfig) DeepCopy() *VsphereClusterProviderConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VsphereClusterProviderConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *AzureKeyVaultProvider) DeepCopy() *AzureKeyVaultProvider {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AzureKeyVaultProvider)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ProviderRef) DeepCopy() *ProviderRef {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ProviderRef)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ProviderPair) DeepCopy() *ProviderPair {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ProviderPair)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ProviderSettingsSpec) DeepCopy() *ProviderSettingsSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ProviderSettingsSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *SamlProvider) DeepCopy() *SamlProvider {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SamlProvider)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *VsphereMachineProviderConfig) DeepCopy() *VsphereMachineProviderConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VsphereMachineProviderConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *OpenLdapProvider) DeepCopy() *OpenLdapProvider {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(OpenLdapProvider)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *AzureConfigStatusProvider) DeepCopy() *AzureConfigStatusProvider {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AzureConfigStatusProvider)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *AuthorizationPolicy_ExtensionProvider) DeepCopy() *AuthorizationPolicy_ExtensionProvider {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AuthorizationPolicy_ExtensionProvider)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ProviderSpec) DeepCopy() *ProviderSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ProviderSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *KeyCloakProvider) DeepCopy() *KeyCloakProvider {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KeyCloakProvider)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func ProviderCfg(cfg config.Configurator) (*Config, func(), error) {\n\tc := &Config{\n\t\tinvoker: invoker.NewInvoker(),\n\t}\n\te := cfg.UnmarshalKeyOnReload(UnmarshalKey, c)\n\treturn c, func() {}, e\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceConfiguration.
func (in *ResourceConfiguration) DeepCopy() *ResourceConfiguration { if in == nil { return nil } out := new(ResourceConfiguration) in.DeepCopyInto(out) return out }
[ "func (in *ResourceConfig) DeepCopy() *ResourceConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ResourceConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ResourceLogConfiguration) DeepCopy() *ResourceLogConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ResourceLogConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *BaseKubernetesResourceConfig) DeepCopy() *BaseKubernetesResourceConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(BaseKubernetesResourceConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ResourceOptions) DeepCopy() *ResourceOptions {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ResourceOptions)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *IpConfiguration_ARM) DeepCopy() *IpConfiguration_ARM {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(IpConfiguration_ARM)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ResourceLogConfiguration_ARM) DeepCopy() *ResourceLogConfiguration_ARM {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ResourceLogConfiguration_ARM)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *LabelingJobResourceConfig) DeepCopy() *LabelingJobResourceConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(LabelingJobResourceConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ResourceInclusionConfig) DeepCopy() *ResourceInclusionConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ResourceInclusionConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ResourceDescription) DeepCopy() *ResourceDescription {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ResourceDescription)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *RegistryConfiguration) DeepCopy() *RegistryConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(RegistryConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *Resource) DeepCopy() *Resource {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Resource)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ResourceOutput) DeepCopy() *ResourceOutput {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ResourceOutput)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ResourceCondition) DeepCopy() *ResourceCondition {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ResourceCondition)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ResourceFile) DeepCopy() *ResourceFile {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ResourceFile)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ResourceAttribute) DeepCopy() *ResourceAttribute {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ResourceAttribute)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ApplicationGatewaySubResource) DeepCopy() *ApplicationGatewaySubResource {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ApplicationGatewaySubResource)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *RedisConfiguration) DeepCopy() *RedisConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(RedisConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *PrivateEndpointIPConfiguration_ARM) DeepCopy() *PrivateEndpointIPConfiguration_ARM {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PrivateEndpointIPConfiguration_ARM)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ApplicationGatewaySubResource_ARM) DeepCopy() *ApplicationGatewaySubResource_ARM {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ApplicationGatewaySubResource_ARM)\n\tin.DeepCopyInto(out)\n\treturn out\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretboxConfiguration.
func (in *SecretboxConfiguration) DeepCopy() *SecretboxConfiguration { if in == nil { return nil } out := new(SecretboxConfiguration) in.DeepCopyInto(out) return out }
[ "func (in *AppCatalogSpecConfigSecret) DeepCopy() *AppCatalogSpecConfigSecret {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AppCatalogSpecConfigSecret)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ChartSpecConfigSecret) DeepCopy() *ChartSpecConfigSecret {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ChartSpecConfigSecret)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *SecretEngineConfiguration) DeepCopy() *SecretEngineConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SecretEngineConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *AppSpecConfigSecret) DeepCopy() *AppSpecConfigSecret {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AppSpecConfigSecret)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ConfigMapSecret) DeepCopy() *ConfigMapSecret {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ConfigMapSecret)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *ImagePullSecretConfig) DeepCopy() *ImagePullSecretConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ImagePullSecretConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *DatabaseSecretEngineConfig) DeepCopy() *DatabaseSecretEngineConfig {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(DatabaseSecretEngineConfig)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *VaultSecret) DeepCopy() *VaultSecret {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(VaultSecret)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *Secret) DeepCopy() *Secret {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Secret)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *SecretAccessRequestConfiguration) DeepCopy() *SecretAccessRequestConfiguration {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SecretAccessRequestConfiguration)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *AppSpecKubeConfigSecret) DeepCopy() *AppSpecKubeConfigSecret {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AppSpecKubeConfigSecret)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *BcsSecret) DeepCopy() *BcsSecret {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(BcsSecret)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *DeviceSecretVerifierConfigType) DeepCopy() *DeviceSecretVerifierConfigType {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(DeviceSecretVerifierConfigType)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *SecretConfigmap) DeepCopy() *SecretConfigmap {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SecretConfigmap)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *DatabaseSecretEngineConfigSpec) DeepCopy() *DatabaseSecretEngineConfigSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(DatabaseSecretEngineConfigSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *CustomSecret) DeepCopy() *CustomSecret {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(CustomSecret)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *CredentialSecret) DeepCopy() *CredentialSecret {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(CredentialSecret)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (b Bytes) SecretBox(n SecretBoxNonce, k SecretBoxKey) (c Bytes) {\n\tcheckTypedSize(&n, \"nonce\")\n\tcheckTypedSize(&k, \"secret key\")\n\n\tbp, bl := plen(b)\n\tc = make([]byte, bl+cryptoSecretBoxMacBytes)\n\tif int(C.crypto_secretbox_easy(\n\t\t(*C.uchar)(&c[0]),\n\t\t(*C.uchar)(bp),\n\t\t(C.ulonglong)(bl),\n\t\t(*C.uchar)(&n.Bytes[0]),\n\t\t(*C.uchar)(&k.Bytes[0]))) != 0 {\n\t\tpanic(\"see libsodium\")\n\t}\n\n\treturn\n}", "func (proxy *StandAloneProxyConfig) DeepCopy() *StandAloneProxyConfig {\n\tif proxy == nil {\n\t\treturn nil\n\t}\n\tcloned := new(StandAloneProxyConfig)\n\tcloned.proxyCredentials = make(map[string]*ProxyUser)\n\tcloned.managementServers = make(map[url.URL]*ManagementServer)\n\tcloned.managedArrays = make(map[string]*StorageArray)\n\tfor key, value := range proxy.managedArrays {\n\t\tarray := *value\n\t\tcloned.managedArrays[key] = &array\n\t}\n\tfor key, value := range proxy.managementServers {\n\t\tcloned.managementServers[key] = value.DeepCopy()\n\t}\n\tfor key, value := range proxy.proxyCredentials {\n\t\tcreds := *value\n\t\tcloned.proxyCredentials[key] = &creds\n\t}\n\treturn cloned\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsOk check response code is equal to 200
func (r *Response) IsOk() bool { return r.Code == ok }
[ "func (resp *Response) OK() bool {\n\treturn resp.StatusCode < 400\n}", "func (c *Response) IsOK()bool {\n\treturn c.resp.Status == \"200\"\n}", "func (w *responseWrapper) IsOK() bool {\n\treturn w.status == 200\n}", "func (resp *Response) StatusOk() bool {\n\treturn resp.statusCode == http.StatusOK\n}", "func (s *APIStatusResponse) OK() bool {\n\treturn s.StatusCode == \"ok\"\n}", "func (r *ParsedResponse) IsOK() bool {\n\treturn r.Code == codeOK\n}", "func (_this *Response) Ok() bool {\n\tvar ret bool\n\tvalue := _this.Value_JS.Get(\"ok\")\n\tret = (value).Bool()\n\treturn ret\n}", "func isOK(statusCode int) bool {\n\treturn statusCode < minHTTPErrorStatusCode\n}", "func (r *AliasResponse) IsOk() bool {\n\treturn r.redirectURL.Query().Get(\"status\") == \"0\"\n}", "func statusOK(code int) bool {\n\treturn 200 <= code && code < 300\n}", "func (cr *ClientResponse) Ok() bool {\n\treturn cr.ok\n}", "func (c *Client) IsOK() bool {\n\turl := fmt.Sprintf(\"%s/v1/sys/health\", c.addr)\n\n\tr, _ := http.NewRequest(http.MethodGet, url, nil)\n\t//r.Header.Add(\"X-Vault-Token\", \"root\")\n\n\tresp, err := http.DefaultClient.Do(r)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (s statusCode) Successful() bool { return s >= 200 && s < 300 }", "func isSuccess(code int) bool {\n\treturn code == http.StatusOK ||\n\t\tcode == http.StatusCreated ||\n\t\tcode == http.StatusAccepted\n}", "func (r ParsedResponses) IsOK() bool {\n\treturn r.Len() > 0 && r[0].IsOK()\n}", "func (er *ExitResponse) IsOk() bool {\n\treturn er.Ok\n}", "func (o *WebSvmGetOK) IsCode(code int) bool {\n\treturn code == 200\n}", "func IsSuccessful(response *http.Response, err error) bool {\n\treturn err == nil && response != nil && response.StatusCode < 400\n}", "func (o *PlexGetOK) IsCode(code int) bool {\n\treturn code == 200\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use SyncLocationReq.ProtoReflect.Descriptor instead.
func (*SyncLocationReq) Descriptor() ([]byte, []int) { return file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{0} }
[ "func (*UpdateVehicleLocationRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_maps_fleetengine_v1_vehicle_api_proto_rawDescGZIP(), []int{3}\n}", "func (*UpdateLocationRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_connection_user_v1_proto_rawDescGZIP(), []int{2}\n}", "func (*SyncLocationRsp) Descriptor() ([]byte, []int) {\n\treturn file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{1}\n}", "func (*GetLocationRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_connection_user_v1_proto_rawDescGZIP(), []int{3}\n}", "func (*RefreshRequest) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{16}\n}", "func (*UpdatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{9}\n}", "func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_contact_proto_rawDescGZIP(), []int{12}\n}", "func (*LocationUpdateResponse) Descriptor() ([]byte, []int) {\n\treturn file_location_proto_rawDescGZIP(), []int{2}\n}", "func (*DiscoveryRequest) Descriptor() ([]byte, []int) {\n\treturn file_xds_envoy_simplified_proto_rawDescGZIP(), []int{1}\n}", "func (*UpdateCallRequest) Descriptor() ([]byte, []int) {\n\treturn file_calling_message_proto_rawDescGZIP(), []int{18}\n}", "func (*UpdateDomainMappingRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_appengine_v1_appengine_proto_rawDescGZIP(), []int{40}\n}", "func (*GetRelatedLocationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_place_proto_rawDescGZIP(), []int{6}\n}", "func (*UpdateRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_grpc_proto_rawDescGZIP(), []int{8}\n}", "func (*SyncRequest) Descriptor() ([]byte, []int) {\n\treturn file_recordwants_proto_rawDescGZIP(), []int{12}\n}", "func (*WarnRequest) Descriptor() ([]byte, []int) {\n\treturn file_punishments_punishments_proto_rawDescGZIP(), []int{12}\n}", "func (*UpdateTelemetryReportedRequest) Descriptor() ([]byte, []int) {\n\treturn file_external_applications_applications_proto_rawDescGZIP(), []int{29}\n}", "func (*PatchAnnotationsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{4}\n}", "func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}", "func (*MemberReceiveAddressUpdateReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{86}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use SyncLocationRsp.ProtoReflect.Descriptor instead.
func (*SyncLocationRsp) Descriptor() ([]byte, []int) { return file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{1} }
[ "func (*SyncLocationReq) Descriptor() ([]byte, []int) {\n\treturn file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{0}\n}", "func (*TransGrpcRsp) Descriptor() ([]byte, []int) {\n\treturn file_inner_proto_rawDescGZIP(), []int{8}\n}", "func (*LocationUpdateResponse) Descriptor() ([]byte, []int) {\n\treturn file_location_proto_rawDescGZIP(), []int{2}\n}", "func (*UpdateFriendStatusRsp) Descriptor() ([]byte, []int) {\n\treturn file_v1_friend_friend_proto_rawDescGZIP(), []int{3}\n}", "func (*UpdateVehicleLocationRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_maps_fleetengine_v1_vehicle_api_proto_rawDescGZIP(), []int{3}\n}", "func (*RefreshResponse) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{17}\n}", "func (DeliveryVehicleLocationSensor) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_maps_fleetengine_delivery_v1_common_proto_rawDescGZIP(), []int{0}\n}", "func (*GetRsp) Descriptor() ([]byte, []int) {\n\treturn file_grpc_proto_rawDescGZIP(), []int{1}\n}", "func (*RefreshRequest) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{16}\n}", "func (*SyncOfflineSysMsgsEventRsp) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_msg_SyncOfflineSysMsgsEvent_proto_rawDescGZIP(), []int{0}\n}", "func (*ClusterRsp) Descriptor() ([]byte, []int) {\n\treturn file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{8}\n}", "func (*LocationTypeEnum) Descriptor() ([]byte, []int) {\n\treturn file_registry_RegistryService_proto_rawDescGZIP(), []int{28}\n}", "func (*GroupRsp) Descriptor() ([]byte, []int) {\n\treturn file_chatMsg_msg_proto_rawDescGZIP(), []int{6}\n}", "func (*CreateFriendRsp) Descriptor() ([]byte, []int) {\n\treturn file_v1_friend_friend_proto_rawDescGZIP(), []int{1}\n}", "func (*LeaveTeamRsp) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_team_LeaveTeam_proto_rawDescGZIP(), []int{1}\n}", "func (*MemberReceiveAddressUpdateResp) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{87}\n}", "func (*GetLocationResponse) Descriptor() ([]byte, []int) {\n\treturn file_api_protobuf_spec_connection_user_v1_proto_rawDescGZIP(), []int{4}\n}", "func (Diagnostic_Kind) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_api_servicemanagement_v1_resources_proto_rawDescGZIP(), []int{2, 0}\n}", "func (*DeliveryVehicleLocation) Descriptor() ([]byte, []int) {\n\treturn file_google_maps_fleetengine_delivery_v1_common_proto_rawDescGZIP(), []int{1}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use RemoveKeeperReq.ProtoReflect.Descriptor instead.
func (*RemoveKeeperReq) Descriptor() ([]byte, []int) { return file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{2} }
[ "func (*RemoveMemberReq) Descriptor() ([]byte, []int) {\n\treturn file_inner_proto_rawDescGZIP(), []int{11}\n}", "func (*ExternalIDPRemoveRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{162}\n}", "func (*RemoveRequest) Descriptor() ([]byte, []int) {\n\treturn file_natan_proto_rawDescGZIP(), []int{7}\n}", "func (*RemoveCheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_ocp_check_api_ocp_check_api_proto_rawDescGZIP(), []int{10}\n}", "func (*RemoveMessageRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_gogoatalk_proto_rawDescGZIP(), []int{9}\n}", "func (*RemoveRequest) Descriptor() ([]byte, []int) {\n\treturn file_proxima_proto_rawDescGZIP(), []int{19}\n}", "func (*RemoveRuleRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_api_proto_rawDescGZIP(), []int{6}\n}", "func (*InternalVodRemoveRecordsRequest) Descriptor() ([]byte, []int) {\n\treturn file_code_justin_tv_vod_vodapi_rpc_vodapi_requests_proto_rawDescGZIP(), []int{34}\n}", "func (*RemoveTokenRequest) Descriptor() ([]byte, []int) {\n\treturn file_v1_notification_proto_rawDescGZIP(), []int{1}\n}", "func (*CPlayer_RemoveFriend_Request) Descriptor() ([]byte, []int) {\n\treturn file_steammessages_player_steamworkssdk_proto_rawDescGZIP(), []int{20}\n}", "func (*ContainerRemoveRequest) Descriptor() ([]byte, []int) {\n\treturn file_gRpcServer_proto_rawDescGZIP(), []int{46}\n}", "func (*NetworkRemoveRequest) Descriptor() ([]byte, []int) {\n\treturn file_gRpcServer_proto_rawDescGZIP(), []int{60}\n}", "func (*RemoveReqMsg) Descriptor() ([]byte, []int) {\n\treturn file_register_proto_rawDescGZIP(), []int{5}\n}", "func (*MemberLevelDeleteReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{52}\n}", "func (*MessageHubRemoveRequest) Descriptor() ([]byte, []int) {\n\treturn file_messagehub_proto_rawDescGZIP(), []int{7}\n}", "func (*PeerUnregisterRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_network_pb2_network_proto_rawDescGZIP(), []int{2}\n}", "func (*KvsDeleteRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_rpc_proto_rawDescGZIP(), []int{7}\n}", "func (*NetworkRemoveByNameRequest) Descriptor() ([]byte, []int) {\n\treturn file_gRpcServer_proto_rawDescGZIP(), []int{61}\n}", "func (*MemberReceiveAddressDeleteReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{88}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use AddKeeperReq.ProtoReflect.Descriptor instead.
func (*AddKeeperReq) Descriptor() ([]byte, []int) { return file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{3} }
[ "func (*RemoveKeeperReq) Descriptor() ([]byte, []int) {\n\treturn file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{2}\n}", "func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}", "func (*AddRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_calculator_proto_calc_proto_rawDescGZIP(), []int{0}\n}", "func (*MemberLevelAddReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{45}\n}", "func (*AddWalletRequest) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_rawDescGZIP(), []int{11}\n}", "func (*CPlayer_AddFriend_Request) Descriptor() ([]byte, []int) {\n\treturn file_steammessages_player_steamworkssdk_proto_rawDescGZIP(), []int{18}\n}", "func (*AddRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_calculate_proto_rawDescGZIP(), []int{3}\n}", "func (*AddApikeyRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_proto_rawDescGZIP(), []int{2}\n}", "func (*AddClusterZookeeperRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_mdb_clickhouse_v1_cluster_service_proto_rawDescGZIP(), []int{15}\n}", "func (*AddProducerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{2}\n}", "func (*AddRequest) Descriptor() ([]byte, []int) {\n\treturn file_protos_proto_rawDescGZIP(), []int{3}\n}", "func (*AddUserPlus_Request) Descriptor() ([]byte, []int) {\n\treturn file_grpc_proto_rawDescGZIP(), []int{26}\n}", "func (*MemberAddReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{0}\n}", "func (*ModifyRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{10}\n}", "func (*SwitchKeeperReq) Descriptor() ([]byte, []int) {\n\treturn file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{5}\n}", "func (*UpdateWithdrawRequest) Descriptor() ([]byte, []int) {\n\treturn file_services_temporal_service_proto_rawDescGZIP(), []int{4}\n}", "func (*AddLessons_Request) Descriptor() ([]byte, []int) {\n\treturn file_schedule_service_api_proto_rawDescGZIP(), []int{4}\n}", "func (*UpdatePermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{9}\n}", "func (*AddWantRequest) Descriptor() ([]byte, []int) {\n\treturn file_recordwants_proto_rawDescGZIP(), []int{8}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use AssignAck.ProtoReflect.Descriptor instead.
func (*AssignAck) Descriptor() ([]byte, []int) { return file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{4} }
[ "func (*Ack) Descriptor() ([]byte, []int) {\n\treturn edgelq_devices_proto_v1alpha2_broker_custom_proto_rawDescGZIP(), []int{6}\n}", "func (*ActionAck) Descriptor() ([]byte, []int) {\n\treturn file_snake_proto_rawDescGZIP(), []int{8}\n}", "func (*EpochChangeAck) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{22}\n}", "func (*AckRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_acknowledge_proto_rawDescGZIP(), []int{0}\n}", "func (*RequestAck) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{15}\n}", "func (*AckResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_acknowledge_proto_rawDescGZIP(), []int{1}\n}", "func (*MoveAcknowledgment) Descriptor() ([]byte, []int) {\n\treturn file_FillerGame_proto_rawDescGZIP(), []int{4}\n}", "func (*Assignment) Descriptor() ([]byte, []int) {\n\treturn file_api_messages_proto_rawDescGZIP(), []int{2}\n}", "func (*WordAck) Descriptor() ([]byte, []int) {\n\treturn file_messages_management_proto_rawDescGZIP(), []int{22}\n}", "func (*PullFrameFromFlowResponse_Ack) Descriptor() ([]byte, []int) {\n\treturn file_pull_frame_from_flow_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*AckRequest) Descriptor() ([]byte, []int) {\n\treturn file_controls_proto_rawDescGZIP(), []int{2}\n}", "func (*CFriendMessages_AckMessage_Notification) Descriptor() ([]byte, []int) {\n\treturn file_steammessages_friendmessages_steamclient_proto_rawDescGZIP(), []int{6}\n}", "func (*CCLCMsg_BaselineAck) Descriptor() ([]byte, []int) {\n\treturn file_netmessages_proto_rawDescGZIP(), []int{17}\n}", "func (*CCLCMsg_BaselineAck) Descriptor() ([]byte, []int) {\n\treturn file_csgo_netmessages_proto_rawDescGZIP(), []int{17}\n}", "func (*PullFrameFromFlowSetResponse_Ack) Descriptor() ([]byte, []int) {\n\treturn file_pull_frame_from_flow_set_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*AcknowledgeRequest) Descriptor() ([]byte, []int) {\n\treturn file_toit_api_pubsub_subscribe_proto_rawDescGZIP(), []int{11}\n}", "func (*AckResponse) Descriptor() ([]byte, []int) {\n\treturn file_controls_proto_rawDescGZIP(), []int{3}\n}", "func (*EventAck) Descriptor() ([]byte, []int) {\n\treturn file_arista_event_v1_event_proto_rawDescGZIP(), []int{2}\n}", "func (*NetworkAcknowledgement) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_network_pb2_network_proto_rawDescGZIP(), []int{8}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use SwitchKeeperReq.ProtoReflect.Descriptor instead.
func (*SwitchKeeperReq) Descriptor() ([]byte, []int) { return file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{5} }
[ "func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) {\n\treturn file_tfplugin6_proto_rawDescGZIP(), []int{13, 0}\n}", "func (*StopProvider_Request) Descriptor() ([]byte, []int) {\n\treturn file_tfplugin6_proto_rawDescGZIP(), []int{3, 0}\n}", "func (*ControlRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_gateway_v1_control_proto_rawDescGZIP(), []int{0}\n}", "func (*CheckRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_api_servicecontrol_v1_service_controller_proto_rawDescGZIP(), []int{0}\n}", "func (*SwitchRuntimeRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_notebooks_v1_managed_service_proto_rawDescGZIP(), []int{7}\n}", "func (*SVRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_drkey_mgmt_v1_mgmt_proto_rawDescGZIP(), []int{0}\n}", "func (*WatchRequest) Descriptor() ([]byte, []int) {\n\treturn file_authzed_api_v0_watch_service_proto_rawDescGZIP(), []int{0}\n}", "func (*SwitchKeeperRsp) Descriptor() ([]byte, []int) {\n\treturn file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{6}\n}", "func (*WatchRequestTypeProto) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{25}\n}", "func (*ConfigRequest_V1_Deprecated) Descriptor() ([]byte, []int) {\n\treturn file_config_opensearch_config_request_proto_rawDescGZIP(), []int{0, 0, 23}\n}", "func (*WatchMetricDescriptorRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_monitoring_proto_v3_metric_descriptor_service_proto_rawDescGZIP(), []int{2}\n}", "func (*ModeChangeRequest) Descriptor() ([]byte, []int) {\n\treturn file_simulator_controller_proto_rawDescGZIP(), []int{9}\n}", "func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) {\n\treturn file_tfplugin6_proto_rawDescGZIP(), []int{8, 0}\n}", "func (*ListenForConnectionsRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_devices_proto_v1alpha2_broker_custom_proto_rawDescGZIP(), []int{0}\n}", "func (*ChangeRequest) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{2}\n}", "func (*ProbeRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_gateway_v1_control_proto_rawDescGZIP(), []int{2}\n}", "func (*RemoveKeeperReq) Descriptor() ([]byte, []int) {\n\treturn file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{2}\n}", "func (*DiscoveryRequest) Descriptor() ([]byte, []int) {\n\treturn file_xds_envoy_simplified_proto_rawDescGZIP(), []int{1}\n}", "func (*OpenConnectionChannelSocketRequest) Descriptor() ([]byte, []int) {\n\treturn edgelq_devices_proto_v1alpha2_broker_custom_proto_rawDescGZIP(), []int{2}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use SwitchKeeperRsp.ProtoReflect.Descriptor instead.
func (*SwitchKeeperRsp) Descriptor() ([]byte, []int) { return file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{6} }
[ "func (*LeaveTeamRsp) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_team_LeaveTeam_proto_rawDescGZIP(), []int{1}\n}", "func (SVC_Messages) EnumDescriptor() ([]byte, []int) {\n\treturn file_csgo_netmessages_proto_rawDescGZIP(), []int{4}\n}", "func (SVC_Messages) EnumDescriptor() ([]byte, []int) {\n\treturn file_netmessages_proto_rawDescGZIP(), []int{4}\n}", "func (*TransGrpcRsp) Descriptor() ([]byte, []int) {\n\treturn file_inner_proto_rawDescGZIP(), []int{8}\n}", "func (*StopProvider_Response) Descriptor() ([]byte, []int) {\n\treturn file_tfplugin6_proto_rawDescGZIP(), []int{3, 1}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*SVResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_drkey_mgmt_v1_mgmt_proto_rawDescGZIP(), []int{1}\n}", "func (*GroupRsp) Descriptor() ([]byte, []int) {\n\treturn file_chatMsg_msg_proto_rawDescGZIP(), []int{6}\n}", "func (*SVRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_drkey_mgmt_v1_mgmt_proto_rawDescGZIP(), []int{0}\n}", "func (*ChangeInfoRsp) Descriptor() ([]byte, []int) {\n\treturn file_Auth_Auth_proto_rawDescGZIP(), []int{5}\n}", "func (*GetRsp) Descriptor() ([]byte, []int) {\n\treturn file_grpc_proto_rawDescGZIP(), []int{1}\n}", "func (StatusMessage_Reference) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_devtools_clouddebugger_v2_data_proto_rawDescGZIP(), []int{1, 0}\n}", "func (ComponentUpgrade_ReverificationStatus) EnumDescriptor() ([]byte, []int) {\n\treturn file_pkg_samsahai_rpc_service_proto_rawDescGZIP(), []int{6, 2}\n}", "func (CLC_Messages) EnumDescriptor() ([]byte, []int) {\n\treturn file_csgo_netmessages_proto_rawDescGZIP(), []int{1}\n}", "func (*Listen) Descriptor() ([]byte, []int) {\n\treturn file_pkg_smgrpc_smgrpc_proto_rawDescGZIP(), []int{4}\n}", "func (SpanLayer) EnumDescriptor() ([]byte, []int) {\n\treturn file_language_agent_Tracing_proto_rawDescGZIP(), []int{2}\n}", "func (ListenResponse_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_listen_proto_rawDescGZIP(), []int{1, 0}\n}", "func (*AddPeerResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{30}\n}", "func (*WatchMetricDescriptorResponse) Descriptor() ([]byte, []int) {\n\treturn edgelq_monitoring_proto_v3_metric_descriptor_service_proto_rawDescGZIP(), []int{3}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use ClusterRsp.ProtoReflect.Descriptor instead.
func (*ClusterRsp) Descriptor() ([]byte, []int) { return file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{8} }
[ "func (*UnregisterClusterResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_grpc_proto_cluster_cluster_proto_rawDescGZIP(), []int{4}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*GroupRsp) Descriptor() ([]byte, []int) {\n\treturn file_chatMsg_msg_proto_rawDescGZIP(), []int{6}\n}", "func (*UnregisterClusterRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_grpc_proto_cluster_cluster_proto_rawDescGZIP(), []int{3}\n}", "func (*AddPeerResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{30}\n}", "func (*StopClusterRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_mdb_greenplum_v1_cluster_service_proto_rawDescGZIP(), []int{11}\n}", "func (*DiscoveryResponse) Descriptor() ([]byte, []int) {\n\treturn file_xds_envoy_simplified_proto_rawDescGZIP(), []int{2}\n}", "func (*LeaveTeamRsp) Descriptor() ([]byte, []int) {\n\treturn file_api_proto_team_LeaveTeam_proto_rawDescGZIP(), []int{1}\n}", "func (*DiscoveryRequest) Descriptor() ([]byte, []int) {\n\treturn file_xds_envoy_simplified_proto_rawDescGZIP(), []int{1}\n}", "func (*TransGrpcRsp) Descriptor() ([]byte, []int) {\n\treturn file_inner_proto_rawDescGZIP(), []int{8}\n}", "func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}", "func (*GetRsp) Descriptor() ([]byte, []int) {\n\treturn file_grpc_proto_rawDescGZIP(), []int{1}\n}", "func (*StopClusterRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_mdb_clickhouse_v1_cluster_service_proto_rawDescGZIP(), []int{11}\n}", "func (*GetPeerInfoResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{28}\n}", "func (*QueryPlanStatusResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{25}\n}", "func (*RefreshNamenodesResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{3}\n}", "func (*GetClusterInfoResponse) Descriptor() ([]byte, []int) {\n\treturn file_uber_cadence_api_v1_service_workflow_proto_rawDescGZIP(), []int{21}\n}", "func (*CancelPlanResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{23}\n}", "func (*SwitchKeeperRsp) Descriptor() ([]byte, []int) {\n\treturn file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{6}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use RegisterNodeReq.ProtoReflect.Descriptor instead.
func (*RegisterNodeReq) Descriptor() ([]byte, []int) { return file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{9} }
[ "func (*RegisterClusterNodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_node_proto_rawDescGZIP(), []int{2}\n}", "func (*NodeGroupForNodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{4}\n}", "func (*SetNodeConfigRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{47}\n}", "func (*RegisterNodeRsp) Descriptor() ([]byte, []int) {\n\treturn file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{10}\n}", "func (*UpdateNodeDNSRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_node_proto_rawDescGZIP(), []int{49}\n}", "func (*UpgradeNodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_node_proto_rawDescGZIP(), []int{24}\n}", "func (*RegisterVerifierRequest) Descriptor() ([]byte, []int) {\n\treturn file_services_trust_registry_v1_trust_registry_proto_rawDescGZIP(), []int{9}\n}", "func (*UpdateNodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_node_proto_rawDescGZIP(), []int{11}\n}", "func (*PeerRegisterRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_network_pb2_network_proto_rawDescGZIP(), []int{1}\n}", "func (*GetNodeSelectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_spire_api_registration_registration_proto_rawDescGZIP(), []int{22}\n}", "func (*UpdateNodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{6}\n}", "func (*RegisterRequest_SecondaryNodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_supernode_proto_rawDescGZIP(), []int{2, 1}\n}", "func (*CreateNodeMetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_alameda_api_v1alpha1_datahub_metrics_services_proto_rawDescGZIP(), []int{4}\n}", "func (*UpdateNSNodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_ns_node_proto_rawDescGZIP(), []int{12}\n}", "func (*InstallNodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_node_proto_rawDescGZIP(), []int{22}\n}", "func (*CreateNodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_node_proto_rawDescGZIP(), []int{0}\n}", "func (*RegistrationAddReq) Descriptor() ([]byte, []int) {\n\treturn file_registration_proto_rawDescGZIP(), []int{11}\n}", "func (*RegisterRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_engine_proto_rawDescGZIP(), []int{6}\n}", "func (*CreateNSNodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_ns_node_proto_rawDescGZIP(), []int{7}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deprecated: Use RegisterNodeRsp.ProtoReflect.Descriptor instead.
func (*RegisterNodeRsp) Descriptor() ([]byte, []int) { return file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{10} }
[ "func (*RegisterNodeReq) Descriptor() ([]byte, []int) {\n\treturn file_Assigneer_Assigneer_proto_rawDescGZIP(), []int{9}\n}", "func (*RegisterReply_SecondaryNodeReply) Descriptor() ([]byte, []int) {\n\treturn file_supernode_proto_rawDescGZIP(), []int{3, 1}\n}", "func (*RegisterClusterNodeResponse) Descriptor() ([]byte, []int) {\n\treturn file_service_node_proto_rawDescGZIP(), []int{3}\n}", "func (*RegisterVerifierRequest) Descriptor() ([]byte, []int) {\n\treturn file_services_trust_registry_v1_trust_registry_proto_rawDescGZIP(), []int{9}\n}", "func (*RegisterVerifierResponse) Descriptor() ([]byte, []int) {\n\treturn file_services_trust_registry_v1_trust_registry_proto_rawDescGZIP(), []int{10}\n}", "func (*SynMemberRsp) Descriptor() ([]byte, []int) {\n\treturn file_inner_proto_rawDescGZIP(), []int{4}\n}", "func (*NodeGroupForNodeResponse) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{5}\n}", "func (*RegisterRequest_SecondaryNodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_supernode_proto_rawDescGZIP(), []int{2, 1}\n}", "func (*RemoveMemberRsp) Descriptor() ([]byte, []int) {\n\treturn file_inner_proto_rawDescGZIP(), []int{12}\n}", "func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}", "func (*UpgradeNodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_node_proto_rawDescGZIP(), []int{24}\n}", "func (*UpgradeNodeResponse) Descriptor() ([]byte, []int) {\n\treturn file_service_node_proto_rawDescGZIP(), []int{25}\n}", "func (*CreateFriendRsp) Descriptor() ([]byte, []int) {\n\treturn file_v1_friend_friend_proto_rawDescGZIP(), []int{1}\n}", "func (*NodeGroupForNodeRequest) Descriptor() ([]byte, []int) {\n\treturn file_cloudprovider_externalgrpc_protos_externalgrpc_proto_rawDescGZIP(), []int{4}\n}", "func (*SetNodeConfigRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{47}\n}", "func (*Registration) Descriptor() ([]byte, []int) {\n\treturn file_protocol_rpc_rpc_proto_rawDescGZIP(), []int{63}\n}", "func (*UnregisterVerifierResponse) Descriptor() ([]byte, []int) {\n\treturn file_services_trust_registry_v1_trust_registry_proto_rawDescGZIP(), []int{14}\n}", "func (*UpdateNodeDNSRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_node_proto_rawDescGZIP(), []int{49}\n}", "func (*TransGrpcRsp) Descriptor() ([]byte, []int) {\n\treturn file_inner_proto_rawDescGZIP(), []int{8}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deserializes your encoded data to tree.
func (this *Codec) deserialize(data string) *TreeNode { data = data[1 : len(data)-1] this.data = strings.Split(data, ",") n := this.d() return n }
[ "func (this *Codec) deserialize(data string) *TreeNode {\n\t serial := strings.Split(data, \" \")\n\t root, _ := deserializeTree(serial, 0)\n\t return root\n }", "func (this *Codec) deserialize(data string) *TreeNode {\n\tlist := strings.Split(data, \",\")\n\treturn buildTree(&list)\n}", "func (this *Codec) deserialize(data string) *TreeNode { \n \n if len(data) == 0 {\n return nil\n }\n \n if ! strings.Contains(data, \"[\") {\n num, _ := strconv.Atoi(data)\n root := TreeNode{\n Val: num,\n }\n return &root\n }\n \n l, r := strings.Index(data, \"[\"), strings.LastIndex(data, \"]\")\n \n num, _ := strconv.Atoi(data[:l])\n root := TreeNode{\n Val: num,\n }\n \n children := helper(data[l+1:r])\n \n left_str, ok := children[\"left\"]\n if ok {\n root.Left = this.deserialize(left_str)\n }\n \n right_str, ok := children[\"right\"]\n if ok {\n root.Right = this.deserialize(right_str)\n }\n \n return &root\n \n}", "func main() {\n\tcodec := Constructor()\n\troot := &TreeNode{3, &TreeNode{4, &TreeNode{6, nil, nil}, nil},\n\t\t&TreeNode{5, &TreeNode{7, nil, nil}, nil}}\n\tdata := codec.serialize(root)\n\tprintln(data)\n\tnewRoot := codec.deserialize(data)\n\tprintln(codec.serialize(newRoot))\n}", "func (ths *TreeHashStack) Deserialize(data []byte) error {\n\treturn gob.NewDecoder(bytes.NewBuffer(data)).Decode(ths)\n}", "func DecodeTree(na ipld.NodeAssembler, rd *bufio.Reader) error {\n\tif _, err := readNullTerminatedNumber(rd); err != nil {\n\t\treturn err\n\t}\n\n\tt := Type.Tree__Repr.NewBuilder()\n\tma, err := t.BeginMap(-1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor {\n\t\tname, node, err := DecodeTreeEntry(rd)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tee, err := ma.AssembleEntry(name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = ee.AssignNode(node); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := ma.Finish(); err != nil {\n\t\treturn err\n\t}\n\treturn na.AssignNode(t.Build())\n}", "func (t *Tree) Decode(o core.Object) (err error) {\n\tif o.Type() != core.TreeObject {\n\t\treturn ErrUnsupportedObject\n\t}\n\n\tt.Hash = o.Hash()\n\tif o.Size() == 0 {\n\t\treturn nil\n\t}\n\n\tt.Entries = nil\n\tt.m = nil\n\n\treader, err := o.Reader()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer checkClose(reader, &err)\n\n\tr := bufio.NewReader(reader)\n\tfor {\n\t\tmode, err := r.ReadString(' ')\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\treturn err\n\t\t}\n\n\t\tfm, err := strconv.ParseInt(mode[:len(mode)-1], 8, 32)\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn err\n\t\t}\n\n\t\tname, err := r.ReadString(0)\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn err\n\t\t}\n\n\t\tvar hash core.Hash\n\t\tif _, err = io.ReadFull(r, hash[:]); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbaseName := name[:len(name)-1]\n\t\tt.Entries = append(t.Entries, TreeEntry{\n\t\t\tHash: hash,\n\t\t\tMode: os.FileMode(fm),\n\t\t\tName: baseName,\n\t\t})\n\t}\n\n\treturn nil\n}", "func main() {\n\troot := &TreeNode{\n\t\tVal: 2,\n\t\tLeft: &TreeNode{\n\t\t\tVal: 1,\n\t\t},\n\t\tRight: &TreeNode{\n\t\t\tVal: 3,\n\t\t},\n\t}\n\tprintTreeNodeByDFS(root)\n\tfmt.Println()\n\n\tser := Constructor()\n\ttreeString := ser.serialize(root)\n\tfmt.Println(treeString)\n\tans := ser.deserialize(treeString)\n\t// printTreeNodeByBFS(ans)\n\tprintTreeNodeByDFS(ans)\n\tfmt.Println()\n}", "func parseTreeData(data []byte) (*Tree, error) {\n\ttree := new(Tree)\n\ttree.TreeEntries = make([]*TreeEntry, 0, 10)\n\tl := len(data)\n\tpos := 0\n\tfor pos < l {\n\t\tte := new(TreeEntry)\n\t\tspacepos := bytes.IndexByte(data[pos:], ' ')\n\t\tswitch string(data[pos : pos+spacepos]) {\n\t\tcase \"100644\":\n\t\t\tte.Filemode = FileModeBlob\n\t\t\tte.Type = ObjectBlob\n\t\tcase \"100755\":\n\t\t\tte.Filemode = FileModeBlobExec\n\t\t\tte.Type = ObjectBlob\n\t\tcase \"120000\":\n\t\t\tte.Filemode = FileModeSymlink\n\t\t\tte.Type = ObjectBlob\n\t\tcase \"160000\":\n\t\t\tte.Filemode = FileModeCommit\n\t\t\tte.Type = ObjectCommit\n\t\tcase \"40000\":\n\t\t\tte.Filemode = FileModeTree\n\t\t\tte.Type = ObjectTree\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"unknown type: \" + string(data[pos:pos+spacepos]))\n\t\t}\n\t\tpos += spacepos + 1\n\t\tzero := bytes.IndexByte(data[pos:], 0)\n\t\tte.Name = string(data[pos : pos+zero])\n\t\tpos += zero + 1\n\t\toid, err := NewOid(data[pos : pos+20])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tte.Id = oid\n\t\tpos = pos + 20\n\t\ttree.TreeEntries = append(tree.TreeEntries, te)\n\t}\n\treturn tree, nil\n}", "func decodeV1(data string) (tree.Tree, error) {\n\tvar storage fsStorage\n\tb, err := base64.StdEncoding.DecodeString(data)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"decoding base64\")\n\t}\n\tvar r io.ReadCloser\n\tr, err = gzip.NewReader(bytes.NewReader(b))\n\tif err != nil {\n\t\t// Fallback to non-zipped version.\n\t\tlog.Printf(\n\t\t\t\"Decoding gzip: %s. Falling back to non-gzip loading.\",\n\t\t\terr)\n\t\tr = ioutil.NopCloser(bytes.NewReader(b))\n\t}\n\tdefer r.Close()\n\terr = gob.NewDecoder(r).Decode(&storage)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"decoding gob\")\n\t}\n\tt := make(tree.Tree)\n\tfor dir := range storage.Dirs {\n\t\tt.AddDir(dir)\n\t}\n\tfor path, content := range storage.Files {\n\t\tt.AddFileContent(path, content)\n\t}\n\treturn t, err\n}", "func (st *SplayTree) UnmarshalBinary(data []byte) (err error) {\n\tdec := gob.NewDecoder(bytes.NewReader(data))\n\n\t// Type of Engine\n\tvar typeOfEngine string\n\tif err = dec.Decode(&typeOfEngine); err != nil || typeOfEngine != \"Engine:filtering.SplayTree\" {\n\t\treturn errors.New(\"Wrong Filtering Engine: \" + typeOfEngine)\n\t}\n\n\t// Decode SplayTreeNode\n\terr = dec.Decode(&st.root)\n\n\t// tdt.Core\n\tst.tdtCore = tdt.NewCore()\n\n\treturn\n}", "func (leaf *Node) Decode() ([]byte, error) {\n\tif len(leaf.ContentEncoding) == 0 {\n\t\tleaf.plainv = leaf.V\n\t\treturn leaf.plainv, nil\n\t}\n\n\tleaf.plainv = leaf.plainv[:0]\n\n\tif leaf.ContentEncoding == EncodingGzip {\n\t\tr, err := gzip.NewReader(bytes.NewReader(leaf.V))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tbuf := make([]byte, 1024)\n\t\tfor {\n\t\t\tn, err := r.Read(buf)\n\t\t\tif n > 0 {\n\t\t\t\tleaf.plainv = append(leaf.plainv, buf[:n]...)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbuf = buf[0:]\n\t\t}\n\t}\n\n\treturn leaf.plainv, nil\n}", "func (d *DirTree) Load(reader io.Reader) error {\n\tzreader, err := gzip.NewReader(reader)\n\tif err != nil {\n\t\treturn (fmt.Errorf(\"Error uncompressing state DB: %q\", err))\n\t}\n\tdefer zreader.Close()\n\n\tdec := json.NewDecoder(zreader)\n\terr = dec.Decode(d)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (stn *SplayTreeNode) UnmarshalBinary(data []byte) (err error) {\n\tdec := gob.NewDecoder(bytes.NewReader(data))\n\n\t// reportURI\n\tif err = dec.Decode(&stn.reportURI); err != nil {\n\t\treturn\n\t}\n\n\t// FilterObject\n\tvar hasFilterObject bool\n\tif err = dec.Decode(&hasFilterObject); err != nil {\n\t\treturn\n\t}\n\tif hasFilterObject {\n\t\terr = dec.Decode(&stn.filterObject)\n\t}\n\n\t// matchNext\n\tvar hasMatchNext bool\n\tif err = dec.Decode(&hasMatchNext); err != nil {\n\t\treturn\n\t}\n\tif hasMatchNext {\n\t\terr = dec.Decode(&stn.matchNext)\n\t} else {\n\t\tstn.matchNext = nil\n\t}\n\n\t// mismatchNext\n\tvar hasMismatchNext bool\n\tif err = dec.Decode(&hasMismatchNext); err != nil {\n\t\treturn\n\t}\n\tif hasMismatchNext {\n\t\terr = dec.Decode(&stn.mismatchNext)\n\t} else {\n\t\tstn.mismatchNext = nil\n\t}\n\n\treturn\n}", "func (n *Node) Unmarshal(encoded []byte) error {\n\tvar pbn pb.PBNode\n\tif err := pbn.Unmarshal(encoded); err != nil {\n\t\treturn fmt.Errorf(\"Unmarshal failed. %v\", err)\n\t}\n\n\tpbnl := pbn.GetLinks()\n\tn.Links = make([]*Link, len(pbnl))\n\tfor i, l := range pbnl {\n\t\tn.Links[i] = &Link{Name: l.GetName(), Size: l.GetTsize()}\n\t\th, err := mh.Cast(l.GetHash())\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Link hash is not valid multihash. %v\", err)\n\t\t}\n\t\tn.Links[i].Hash = h\n\t}\n\tsort.Stable(LinkSlice(n.Links)) // keep links sorted\n\n\tn.Data = pbn.GetData()\n\treturn nil\n}", "func (s *NodeKeySignature) Unserialize(data []byte) error {\n\tdec, _ := cborDecOpts.DecMode()\n\treturn dec.Unmarshal(data, s)\n}", "func main() {\n\troot := TreeNode{\n\t\tVal: 1,\n\t\tLeft: &TreeNode{\n\t\t\tVal: 2,\n\t\t\tLeft: nil,\n\t\t\tRight: nil,\n\t\t},\n\t\tRight: &TreeNode{\n\t\t\tVal: 3,\n\t\t\tLeft: &TreeNode{\n\t\t\t\tVal: 4,\n\t\t\t\tRight: nil,\n\t\t\t\tLeft: nil,\n\t\t\t},\n\t\t\tRight: &TreeNode{\n\t\t\t\tVal: 5,\n\t\t\t\tRight: nil,\n\t\t\t\tLeft: nil,\n\t\t\t},\n\t\t},\n\t}\n\tobj := Constructor()\n\tdata := obj.serialize(&root)\n\tfmt.Println(data)\n}", "func (d *decoder) createTree() *node {\n\tif val, _ := readBit(d.r); val {\n\t\treturn &node{readByte(d.r), -1, false, nil, nil}\n\t} else if d.numChars != d.numCharsDecoded {\n\t\tleft := d.createTree()\n\t\tright := d.createTree()\n\t\treturn &node{0, -1, true, left, right}\n\t}\n\n\treturn nil\n}", "func Decoded(encoded []byte) (*Node, error) {\n\tn := new(Node)\n\terr := n.Unmarshal(encoded)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"incorrectly formatted merkledag node: %s\", err)\n\t}\n\treturn n, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DefaultCreateHealthMenstruationPersonalInfo executes a basic gorm create call
func DefaultCreateHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) { if in == nil { return nil, errors1.NilArgumentError } ormObj, err := in.ToORM(ctx) if err != nil { return nil, err } if hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeCreate_); ok { if db, err = hook.BeforeCreate_(ctx, db); err != nil { return nil, err } } if err = db.Create(&ormObj).Error; err != nil { return nil, err } if hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterCreate_); ok { if err = hook.AfterCreate_(ctx, db); err != nil { return nil, err } } pbResponse, err := ormObj.ToPB(ctx) return &pbResponse, err }
[ "func DefaultCreateHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeCreate_); ok {\n\t\tif db, err = hook.BeforeCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Create(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterCreate_); ok {\n\t\tif err = hook.AfterCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultCreateUserInfo(ctx context.Context, in *UserInfo, db *gorm.DB) (*UserInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(UserInfoORMWithBeforeCreate_); ok {\n\t\tif db, err = hook.BeforeCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Create(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(UserInfoORMWithAfterCreate_); ok {\n\t\tif err = hook.AfterCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func createPerson(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"CREATE HIT\")\n\tstmt, err := db.Prepare(\"INSERT INTO Persons(pAge, pName) VALUES (?,?)\")\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tvar per Person\n\tjson.Unmarshal(body, &per)\n\tage := per.Age\n\tname := per.Name\n\t_, err = stmt.Exec(age, name)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tfmt.Fprintf(w, \"New person was created\")\n}", "func DefaultReadHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ormObj.Id == 0 {\n\t\treturn nil, errors1.EmptyIdError\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeReadApplyQuery); ok {\n\t\tif db, err = hook.BeforeReadApplyQuery(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif db, err = gorm2.ApplyFieldSelection(ctx, db, nil, &HealthMenstruationPersonalInfoORM{}); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeReadFind); ok {\n\t\tif db, err = hook.BeforeReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tormResponse := HealthMenstruationPersonalInfoORM{}\n\tif err = db.Where(&ormObj).First(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormResponse).(HealthMenstruationPersonalInfoORMWithAfterReadFind); ok {\n\t\tif err = hook.AfterReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormResponse.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func CreatePerson(w http.ResponseWriter, r *http.Request) {\n\tdecoder := json.NewDecoder(r.Body)\n\tvar p Person\n\terr := decoder.Decode(&p)\n\tcheck(\"Json Decorder Failure\", err)\n\ttopID += 1\n\tui := strconv.Itoa(topID)\n\tKeyStore[0] = Person{ui, \"-first-\", \"-last-\", \"-email-\", \"-phone-\"}\n\tnp := Person{ui, p.FirstName, p.LastName, p.EmailAddr, p.PhoneNumb}\n\tKeyStore[topID] = np\n\tsaveDatabase()\n}", "func CreatePsychologist(dbase *gorm.DB, w http.ResponseWriter, r *http.Request) {\n\tuser := &db.Psychologist{}\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_ = json.NewEncoder(w).Encode(utils.ErrorResponse{\n\t\t\tCode: http.StatusInternalServerError,\n\t\t\tMessage: \"An error occurred\",\n\t\t})\n\t\treturn\n\t}\n\n\tuser.Password = utils.HashPassword(user.Password, w)\n\tif user.Password == \"\" {\n\t\treturn\n\t}\n\n\trs := dbase.Create(&user)\n\tif rs.Error != nil {\n\t\tlog.Println(rs)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tlog.Println(json.NewEncoder(w).Encode(utils.ErrorResponse{\n\t\t\tCode: http.StatusInternalServerError,\n\t\t\tMessage: \"Could not create your account. Please try again later\",\n\t\t}))\n\t\treturn\n\t}\n\n\t// body := struct {\n\t// \tName string\n\t// \tLink string\n\t// }{\n\t// \tName: fmt.Sprintf(\"%s %s\", user.FirstName, user.LastName),\n\t// \tLink: \"https://google.com\",\n\t// }\n\n\t// go func(dbase *gorm.DB, email string, subject string, HTMLTemp string, body interface{}) {\n\t// \terr := utils.SendEmail(dbase, email, subject, HTMLTemp, body)\n\t// \tif err != nil {\n\t// \t\tlog.Println(err)\n\t// \t\t_ = json.NewEncoder(w).Encode(err.Error())\n\t// \t\treturn\n\t// \t}\n\t// }(dbase, user.Email, \"Welcome\", \"templates/email/confirm.html\", body)\n\n\tw.WriteHeader(http.StatusCreated)\n\tlog.Println(json.NewEncoder(w).Encode(user))\n}", "func DefaultListHealthMenstruationPersonalInfo(ctx context.Context, db *gorm1.DB, f *query1.Filtering, s *query1.Sorting, p *query1.Pagination, fs *query1.FieldSelection) ([]*HealthMenstruationPersonalInfo, error) {\n\tin := HealthMenstruationPersonalInfo{}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeListApplyQuery); ok {\n\t\tif db, err = hook.BeforeListApplyQuery(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb, err = gorm2.ApplyCollectionOperators(ctx, db, &HealthMenstruationPersonalInfoORM{}, &HealthMenstruationPersonalInfo{}, f, s, p, fs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeListFind); ok {\n\t\tif db, err = hook.BeforeListFind(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb = db.Where(&ormObj)\n\tdb = db.Order(\"id\")\n\tormResponse := []HealthMenstruationPersonalInfoORM{}\n\tif err := db.Find(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterListFind); ok {\n\t\tif err = hook.AfterListFind(ctx, db, &ormResponse, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse := []*HealthMenstruationPersonalInfo{}\n\tfor _, responseEntry := range ormResponse {\n\t\ttemp, err := responseEntry.ToPB(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbResponse = append(pbResponse, &temp)\n\t}\n\treturn pbResponse, nil\n}", "func HCreateFirstTimeUser(c *gbl.Context) {\n\tmapi := c.Integration.(*fb.MessengerIntegration)\n\n\tuserInfo, err := mapi.API.UserInfo(c.User.ID)\n\tif err != nil {\n\t\tc.Log(10, fmt.Sprintf(\"Error getting user data from fb %v\", err), \"UserExtractor\")\n\t\tc.Next()\n\t\treturn\n\t}\n\n\tinfoMap := userInfo.(map[string]interface{})\n\n\tc.Infof(\"%+v\", infoMap)\n\n\tfirstName := infoMap[\"first_name\"].(string)\n\tlastName := infoMap[\"last_name\"].(string)\n\n\tc.User.FirstName = firstName\n\tc.User.LastName = lastName\n\n\tuser := bdb.User{\n\t\tID: c.User.ID,\n\t\tFirstName: &firstName,\n\t\tLastName: &lastName,\n\t}\n\n\tc.Flag(\"user\", &user)\n\n\t_, err = bdb.DB(c).\n\t\tInsertInto(\"users\").\n\t\tColumns(\"id\", \"first_name\", \"last_name\").\n\t\tValues(c.User.ID, c.User.FirstName, c.User.LastName).\n\t\tExec()\n\tif err != nil {\n\t\tc.Errorf(\"Erorr creating user %v\", err)\n\t}\n\n\tc.Next()\n}", "func (h *Handler) createDeveloper(c *gin.Context) handlerResponse {\n\n\tvar newDeveloper types.Developer\n\tif err := c.ShouldBindJSON(&newDeveloper); err != nil {\n\t\treturn handleBadRequest(err)\n\t}\n\tstoredDeveloper, err := h.service.Developer.Create(newDeveloper, h.who(c))\n\tif err != nil {\n\t\treturn handleError(err)\n\t}\n\treturn handleCreated(storedDeveloper)\n}", "func CreateMeeting(c *gin.Context) {\n // Validate input\n var input CreateMeetingInput\n if err := c.ShouldBindJSON(&input); err != nil {\n c.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n return\n }\n\n // Create meeting\n meeting := models.Meeting{CreatedBy: input.CreatedBy, Title: input.Title, Description: input.Description, StartDate: input.StartDate, EndDate: input.EndDate, Location: input.Location}\n models.DB.Create(&meeting)\n\n c.JSON(http.StatusOK, gin.H{\"data\": meeting})\n}", "func DefaultPatchHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar pbObj HealthMenstruationPersonalInfo\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadHealthMenstruationPersonalInfo(ctx, &HealthMenstruationPersonalInfo{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskHealthMenstruationPersonalInfo(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateHealthMenstruationPersonalInfo(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(HealthMenstruationPersonalInfoWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func (a *App) CreateProfile(w http.ResponseWriter, r *http.Request) {\n\thandler.CreateProfile(a.DB, w, r)\n}", "func createPerson(name string) *Person {\n\n\tvar status string\n\tvar hobby string\n\tvar age int\n\n\t// Look at the value for name and process accordingly\n\tswitch name {\n\tcase \"Joey\":\n\t\tstatus = \"Single\"\n\t\thobby = \"Playing baseball\"\n\t\tage = 58\n\tcase \"John\":\n\t\tstatus = \"Married\"\n\t\thobby = \"Rocking out\"\n\t\tage = 35\n\tcase \"Jimmy\":\n\t\tstatus = \"Single\"\n\t\thobby = \"Playing guitar\"\n\t\tage = 25\n\tcase \"Devon\":\n\t\tstatus = \"Married\"\n\t\thobby = \"Reading comics\"\n\t\tage = 27\n\tdefault:\n\t\tfmt.Println(\"Enter a valid name.\")\n\t}\n\n\t// Invoke the Person struct and fill out according to name\n\tinvokePerson := Person{name: name, status: status, hobby: hobby, age: age}\n\t// Return a Person struct when finished\n\treturn &invokePerson\n}", "func LanguageproficiencyCreate(c *gin.Context) {\n\t// Assign languageproficiency var to model.Languageproficiency so we can bind it with BindJSON\n\tvar languageproficiency model.Languageproficiency\n\n\t// Binding JSON value to languageproficiency\n\tif err := c.BindJSON(&languageproficiency); err != nil {\n\t\tc.AbortWithStatusJSON(http.StatusBadRequest, model.JSONResults{\n\t\t\tStatus: http.StatusBadRequest,\n\t\t\tMessage: model.Message{\n\t\t\t\tError: err.Error(),\n\t\t\t},\n\t\t})\n\t\treturn\n\t}\n\n\t// Validate with govalidator\n\t_, err := validator.ValidateStruct(languageproficiency)\n\tif err != nil {\n\t\tc.AbortWithStatusJSON(http.StatusBadRequest, model.JSONResults{\n\t\t\tStatus: http.StatusBadRequest,\n\t\t\tMessage: model.Message{\n\t\t\t\tError: err.Error(),\n\t\t\t},\n\t\t})\n\t\treturn\n\t}\n\n\t// Check if the user already have a languageproficiency in our system, then set nothing\n\t// Otherwise create a new row for the user\n\tif dbc := db.Create(&languageproficiency); dbc.Error != nil {\n\t\t// Create failed, do something e.g. return, panic etc.\n\t\t// db.Create(&languageproficiency)\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, model.JSONResults{\n\t\t\tStatus: http.StatusInternalServerError,\n\t\t\tMessage: model.Message{\n\t\t\t\tError: dbc.Error.Error(),\n\t\t\t},\n\t\t})\n\t\treturn\n\t}\n\thelper.Log.Println(\"Languageproficiency for : \" + string(languageproficiency.UserID) + \" got created\")\n\n\t// Return the languageproficiency that login\n\tc.JSON(http.StatusOK, model.JSONResults{\n\t\tStatus: http.StatusOK,\n\t\tResult: languageproficiency,\n\t\tMessage: model.Message{\n\t\t\tSuccess: \"Languageproficiency \" + config.SuccessCreate,\n\t\t},\n\t})\n}", "func (m *MeetupsRepo) Create(meetup *models.Meetup) (*models.Meetup, error) {\n _, err := m.DB.Model(meetup).Returning(\"*\").Insert()\n return meetup, err\n}", "func DefaultCreateProfile(ctx context.Context, in *Profile, db *gorm1.DB) (*Profile, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(ProfileORMWithBeforeCreate_); ok {\n\t\tif db, err = hook.BeforeCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Create(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(ProfileORMWithAfterCreate_); ok {\n\t\tif err = hook.AfterCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func CreateNewPatient(c *gin.Context) {\n\tvar patientRequest PatientRequest\n\tc.ShouldBind(&patientRequest)\n\tpatientResponse := PatientResponse{\n\t\tPatientID: \"2018-0001\",\n\t\tName: patientRequest.Name,\n\t\tLastname: patientRequest.Lastname,\n\t\tAge: patientRequest.Age,\n\t}\n\tc.JSON(201, patientResponse)\n\n}", "func (m *HealthMenstruationPersonalInfo) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for Id\n\n\tif v, ok := interface{}(m.GetCreatedAt()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationPersonalInfoValidationError{\n\t\t\t\tfield: \"CreatedAt\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetUpdatedAt()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationPersonalInfoValidationError{\n\t\t\t\tfield: \"UpdatedAt\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\t// no validation rules for ProfileId\n\n\t// no validation rules for PeriodLengthInDays\n\n\t// no validation rules for CycleLengthInDays\n\n\treturn nil\n}", "func NewPersonal(db DBProvider) (*Personal, error) {\n\treturn &Personal{\n\t\tDB: db,\n\t}, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DefaultReadHealthMenstruationPersonalInfo executes a basic gorm read call
func DefaultReadHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) { if in == nil { return nil, errors1.NilArgumentError } ormObj, err := in.ToORM(ctx) if err != nil { return nil, err } if ormObj.Id == 0 { return nil, errors1.EmptyIdError } if hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeReadApplyQuery); ok { if db, err = hook.BeforeReadApplyQuery(ctx, db); err != nil { return nil, err } } if db, err = gorm2.ApplyFieldSelection(ctx, db, nil, &HealthMenstruationPersonalInfoORM{}); err != nil { return nil, err } if hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeReadFind); ok { if db, err = hook.BeforeReadFind(ctx, db); err != nil { return nil, err } } ormResponse := HealthMenstruationPersonalInfoORM{} if err = db.Where(&ormObj).First(&ormResponse).Error; err != nil { return nil, err } if hook, ok := interface{}(&ormResponse).(HealthMenstruationPersonalInfoORMWithAfterReadFind); ok { if err = hook.AfterReadFind(ctx, db); err != nil { return nil, err } } pbResponse, err := ormResponse.ToPB(ctx) return &pbResponse, err }
[ "func DefaultListHealthMenstruationPersonalInfo(ctx context.Context, db *gorm1.DB, f *query1.Filtering, s *query1.Sorting, p *query1.Pagination, fs *query1.FieldSelection) ([]*HealthMenstruationPersonalInfo, error) {\n\tin := HealthMenstruationPersonalInfo{}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeListApplyQuery); ok {\n\t\tif db, err = hook.BeforeListApplyQuery(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb, err = gorm2.ApplyCollectionOperators(ctx, db, &HealthMenstruationPersonalInfoORM{}, &HealthMenstruationPersonalInfo{}, f, s, p, fs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeListFind); ok {\n\t\tif db, err = hook.BeforeListFind(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb = db.Where(&ormObj)\n\tdb = db.Order(\"id\")\n\tormResponse := []HealthMenstruationPersonalInfoORM{}\n\tif err := db.Find(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterListFind); ok {\n\t\tif err = hook.AfterListFind(ctx, db, &ormResponse, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse := []*HealthMenstruationPersonalInfo{}\n\tfor _, responseEntry := range ormResponse {\n\t\ttemp, err := responseEntry.ToPB(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbResponse = append(pbResponse, &temp)\n\t}\n\treturn pbResponse, nil\n}", "func DefaultReadHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ormObj.Id == 0 {\n\t\treturn nil, errors1.EmptyIdError\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeReadApplyQuery); ok {\n\t\tif db, err = hook.BeforeReadApplyQuery(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif db, err = gorm2.ApplyFieldSelection(ctx, db, nil, &HealthMenstruationDailyEntryORM{}); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeReadFind); ok {\n\t\tif db, err = hook.BeforeReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tormResponse := HealthMenstruationDailyEntryORM{}\n\tif err = db.Where(&ormObj).First(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormResponse).(HealthMenstruationDailyEntryORMWithAfterReadFind); ok {\n\t\tif err = hook.AfterReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormResponse.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultCreateHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeCreate_); ok {\n\t\tif db, err = hook.BeforeCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Create(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterCreate_); ok {\n\t\tif err = hook.AfterCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultReadProfile(ctx context.Context, in *Profile, db *gorm1.DB) (*Profile, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ormObj.Id == \"\" {\n\t\treturn nil, errors1.EmptyIdError\n\t}\n\tif hook, ok := interface{}(&ormObj).(ProfileORMWithBeforeReadApplyQuery); ok {\n\t\tif db, err = hook.BeforeReadApplyQuery(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif db, err = gorm2.ApplyFieldSelection(ctx, db, nil, &ProfileORM{}); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(ProfileORMWithBeforeReadFind); ok {\n\t\tif db, err = hook.BeforeReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tormResponse := ProfileORM{}\n\tif err = db.Where(&ormObj).First(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormResponse).(ProfileORMWithAfterReadFind); ok {\n\t\tif err = hook.AfterReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormResponse.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultPatchHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar pbObj HealthMenstruationPersonalInfo\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadHealthMenstruationPersonalInfo(ctx, &HealthMenstruationPersonalInfo{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskHealthMenstruationPersonalInfo(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateHealthMenstruationPersonalInfo(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(HealthMenstruationPersonalInfoWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func (t *SimpleChaincode) hospital_read(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\n\tif len(args) != 1 {\n\t\treturn nil, errors.New(\"Gimme more arguments, 1 to be exact\")\n\t}\n\tbloodTestList, err := stub.GetState(bloodTestIndex)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to get bloodList\")\n\t}\n\tvar bloodInd []string\n\n\terr = json.Unmarshal(bloodTestList, &bloodInd)\n\tif err != nil {\n\t\tfmt.Println(\"you dun goofed\")\n\t}\n\n\tvar bloodAsBytes []byte\n\tvar finalList []byte = []byte(`\"returnedObjects\":[`)\n\tres := bloodTest{}\n\tfor i := range bloodInd {\n\n\t\tbloodAsBytes, err = stub.GetState(bloodInd[i])\n\t\tjson.Unmarshal(bloodAsBytes, &res)\n\t\tif res.Hospital == args[0] {\n\n\t\t\tfinalList = append(finalList, bloodAsBytes...)\n\n\t\t\tif i < (len(bloodInd) - 1) {\n\t\t\t\tfinalList = append(finalList, []byte(`,`)...)\n\t\t\t}\n\t\t}\n\t}\n\tfinalList = append(finalList, []byte(`]`)...)\n\n\treturn finalList, nil\n}", "func DefaultStrictUpdateHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, fmt.Errorf(\"Nil argument to DefaultStrictUpdateHealthMenstruationPersonalInfo\")\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlockedRow := &HealthMenstruationPersonalInfoORM{}\n\tdb.Model(&ormObj).Set(\"gorm:query_option\", \"FOR UPDATE\").Where(\"id=?\", ormObj.Id).First(lockedRow)\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeStrictUpdateCleanup); ok {\n\t\tif db, err = hook.BeforeStrictUpdateCleanup(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeStrictUpdateSave); ok {\n\t\tif db, err = hook.BeforeStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Save(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterStrictUpdateSave); ok {\n\t\tif err = hook.AfterStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &pbResponse, err\n}", "func DefaultReadContact(ctx context.Context, in *Contact, db *gorm1.DB) (*Contact, error) {\n\tif in == nil {\n\t\treturn nil, fmt.Errorf(\"Nil argument to DefaultReadContact\")\n\t}\n\tormParams := ConvertContactToORM(*in)\n\tormResponse := ContactORM{}\n\tif err := db.Set(\"gorm:auto_preload\", true).Where(&ormParams).First(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tpbResponse := ConvertContactFromORM(ormResponse)\n\treturn &pbResponse, nil\n}", "func DefaultReadContact(ctx context.Context, in *Contact, db *gorm.DB) (*Contact, error) {\n\tif in == nil {\n\t\treturn nil, errors.New(\"Nil argument to DefaultReadContact\")\n\t}\n\tormParams, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taccountID, err := auth.GetAccountID(ctx, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tormParams.AccountID = accountID\n\tormResponse := ContactORM{}\n\tif err = db.Set(\"gorm:auto_preload\", true).Where(&ormParams).First(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tpbResponse, err := ormResponse.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func (c *UserRepoImpl) Read(id int) (*model.User, error) {\n\tuser := new(model.User)\n\n\tif err := c.db.Table(\"user\").Where(\"user_id = ?\", id).First(&user).Error; err != nil {\n\t\tlogrus.Error(err)\n\t\treturn nil, errors.New(\"get user data : error \")\n\t}\n\n\treturn user, nil\n}", "func (a *API) Read(rw http.ResponseWriter, req *http.Request) {\n\tvars := mux.Vars(req)\n\tgithubID := vars[\"github\"]\n\n\tcoll := a.Connection.Database(a.DBName).Collection(a.CollectionName)\n\tr := coll.FindOne(context.Background(), bson.M{githubIDAttribute: githubID})\n\n\tvar failed string\n\tif r.Err() != nil && r.Err() == mongo.ErrNoDocuments {\n\t\tfailed = \"developer profile with github ID \" + githubID + \" does not exist\"\n\t\trw.WriteHeader(http.StatusNotFound)\n\t\tlog.Println(failed, r.Err())\n\t\tfmt.Fprintln(rw, failed)\n\t\treturn\n\t} else if r.Err() != nil {\n\t\tfailed = \"operation failed\"\n\t\tlog.Println(r.Err())\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintln(rw, failed)\n\t\treturn\n\t}\n\tvar p model.Developer\n\tr.Decode(&p)\n\tjson.NewEncoder(rw).Encode(&p)\n}", "func DefaultReadContact(ctx context.Context, in *Contact, db *gorm.DB) (*Contact, error) {\n\tif in == nil {\n\t\treturn nil, errors.New(\"Nil argument to DefaultReadContact\")\n\t}\n\tormParams, err := ConvertContactToORM(*in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tormResponse := ContactORM{}\n\tif err = db.Set(\"gorm:auto_preload\", true).Where(&ormParams).First(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tpbResponse, err := ConvertContactFromORM(ormResponse)\n\treturn &pbResponse, err\n}", "func (m *ContactsDefaultServer) CustomRead(ctx context.Context, req *ReadContactRequest) (*ReadContactResponse, error) {\n\tres, err := DefaultReadContact(ctx, &Contact{Id: req.GetId()}, m.DB)\n\tif err != nil {\n\t\tst := status.Newf(codes.Internal, \"Unable to read contact. Error %v\", err)\n\t\tst, _ = st.WithDetails(errdetails.New(codes.Internal, \"CustomRead\", \"Custom error message\"))\n\t\tst, _ = st.WithDetails(errdetails.New(codes.Internal, \"CustomRead\", \"Another custom error message\"))\n\t\treturn nil, st.Err()\n\t}\n\treturn &ReadContactResponse{Result: res}, nil\n}", "func GetHealth(w http.ResponseWriter, r *http.Request, db *sqlx.DB) {\n\tparams := mux.Vars(r)\n\n\thealth := []Health{}\n\n\tvar err error\n\n\tsession, err := store.Get(r, \"auth\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Convert our session data into an instance of User\n\tuser := User{}\n\tuser, _ = session.Values[\"user\"].(User)\n\n\tif user.Username != \"\" && user.AccessLevel == \"admin\" {\n\t\tif _, ok := params[\"id\"]; ok {\n\t\t\terr = db.Select(&health, \"SELECT id, username, ts, variable, value \"+\n\t\t\t\t\"FROM public.health \"+\n\t\t\t\t\"WHERE id = $1 \", params[\"id\"])\n\t\t} else if _, ok = params[\"ts\"]; ok {\n\t\t\terr = db.Select(&health, \"SELECT id, username, ts, variable, value \"+\n\t\t\t\t\"FROM public.health \"+\n\t\t\t\t\"WHERE ts = $1 \", params[\"ts\"])\n\t\t} else if _, ok = params[\"variable\"]; ok {\n\t\t\terr = db.Select(&health, \"SELECT id, username, ts, variable, value \"+\n\t\t\t\t\"FROM public.health \"+\n\t\t\t\t\"WHERE variable = $1 \", params[\"variable\"])\n\t\t} else {\n\t\t\terr = db.Select(&health, \"SELECT id, username, ts, variable, value \"+\n\t\t\t\t\"FROM public.health \")\n\t\t}\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\tw.WriteHeader(http.StatusOK)\n\n\t\tif err := json.NewEncoder(w).Encode(health); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else {\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t\tw.WriteHeader(http.StatusOK)\n\n\t\tif err := json.NewEncoder(w).Encode(\"access denied\"); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tlogRequest(r)\n}", "func ReadHandler(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\t// Pass the call to the model with params found in the path\n\tfmt.Println(\"Path vars: \", request.PathParameters[\"id\"])\n\tuser, err := user.Read(request.PathParameters[\"id\"])\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to find user, %v\", err))\n\t}\n\n\t// Make sure the user isn't empty\n\tif uuid.Must(uuid.FromString(user.ID)) == uuid.Nil {\n\t\tfmt.Println(\"Could not find user\")\n\t\treturn events.APIGatewayProxyResponse{Body: request.Body, StatusCode: 500}, nil\n\t}\n\n\t// Log and return result\n\tjsonItem, _ := json.MarshalIndent(user, \"\", \" \")\n\tstringItem := string(jsonItem)\n\tfmt.Println(\"Found item: \", stringItem)\n\treturn events.APIGatewayProxyResponse{Body: stringItem, StatusCode: 200}, nil\n}", "func Read(w http.ResponseWriter, r *http.Request) {\n\n\t//use above functions to parse string and find user\n\tuName := GetQueryString(w, r)\n\n\tuser, findErr := userOps.FindUserByUsername(uName)\n\n\t//handle err\n\tif findErr != nil {\n\t\tw.WriteHeader(404)\n\t\tw.Write([]byte(\"Not found\"))\n\t\treturn\n\t}\n\n\t//Success, 200 response\n\tw.WriteHeader(200)\n\tw.Write([]byte(user.Username))\n}", "func (m *Master) ReadInfoFromFile(filepath string) error {\n\t// open file\n\tfile, err := os.OpenFile(filepath, os.O_RDONLY|os.O_CREATE, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// close file\n\tdefer file.Close()\n\n\t// check file size\n\t// stats, err := file.Stat()\n\t// if err != nil {\n\t// \treturn err\n\t// }\n\t// fileSize := stats.Size()\n\t// buf := make([]byte, fileSize)\n\n\t// read file\n\tdataLenBytes := make([]byte, 8)\n\tuserTypeBytes := make([]byte, 1)\n\tfor {\n\t\t// data len\n\t\tvar dataLen int64 = 0 // data len\n\t\tn, err := io.ReadFull(file, dataLenBytes)\n\t\tif err != nil || n != 8 {\n\t\t\tbreak\n\t\t}\n\t\tbytesBuffer := bytes.NewBuffer(dataLenBytes)\n\t\tbinary.Read(bytesBuffer, binary.LittleEndian, &dataLen)\n\t\t// fmt.Println(\"data len =\", dataLen)\n\n\t\t// user type\n\t\tn, err = io.ReadFull(file, userTypeBytes)\n\t\tif err != nil || n != 1 {\n\t\t\tbreak\n\t\t}\n\t\t// fmt.Println(\"user type =\", userTypeBytes[0])\n\n\t\t// data\n\t\tfile.Seek(-1, 1) // 回退一个字节,用户类型\n\t\tdataBytes := make([]byte, dataLen)\n\t\tn, err = io.ReadFull(file, dataBytes)\n\t\tif err != nil || int64(n) != dataLen {\n\t\t\tbreak\n\t\t}\n\n\t\tswitch model.UserType(userTypeBytes[0]) {\n\t\tcase model.TypeTeacher:\n\t\t\ts := &model.Teacher{}\n\t\t\tif err := s.UnSerialize(dataBytes); err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// s.DisplayInfo()\n\t\t\tm.allUserInfo[model.AllUserType[0]][s.ID] = s\n\t\tcase model.TypeStudent:\n\t\t\tt := &model.Student{}\n\t\t\tif err := t.UnSerialize(dataBytes); err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// t.DisplayInfo()\n\t\t\tm.allUserInfo[model.AllUserType[1]][t.ID] = t\n\t\t}\n\t}\n\t// fmt.Println(\"file size =\", fileSize, \",read size =\", n)\n\n\treturn nil\n}", "func (dao *baseDAO) ReadList() {}", "func (dao *baseDAO) ReadOne(id int32) {}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DefaultStrictUpdateHealthMenstruationPersonalInfo clears first level 1:many children and then executes a gorm update call
func DefaultStrictUpdateHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) { if in == nil { return nil, fmt.Errorf("Nil argument to DefaultStrictUpdateHealthMenstruationPersonalInfo") } ormObj, err := in.ToORM(ctx) if err != nil { return nil, err } lockedRow := &HealthMenstruationPersonalInfoORM{} db.Model(&ormObj).Set("gorm:query_option", "FOR UPDATE").Where("id=?", ormObj.Id).First(lockedRow) if hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeStrictUpdateCleanup); ok { if db, err = hook.BeforeStrictUpdateCleanup(ctx, db); err != nil { return nil, err } } if hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeStrictUpdateSave); ok { if db, err = hook.BeforeStrictUpdateSave(ctx, db); err != nil { return nil, err } } if err = db.Save(&ormObj).Error; err != nil { return nil, err } if hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterStrictUpdateSave); ok { if err = hook.AfterStrictUpdateSave(ctx, db); err != nil { return nil, err } } pbResponse, err := ormObj.ToPB(ctx) if err != nil { return nil, err } return &pbResponse, err }
[ "func DefaultPatchHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar pbObj HealthMenstruationPersonalInfo\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadHealthMenstruationPersonalInfo(ctx, &HealthMenstruationPersonalInfo{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskHealthMenstruationPersonalInfo(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateHealthMenstruationPersonalInfo(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(HealthMenstruationPersonalInfoWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func DefaultListHealthMenstruationPersonalInfo(ctx context.Context, db *gorm1.DB, f *query1.Filtering, s *query1.Sorting, p *query1.Pagination, fs *query1.FieldSelection) ([]*HealthMenstruationPersonalInfo, error) {\n\tin := HealthMenstruationPersonalInfo{}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeListApplyQuery); ok {\n\t\tif db, err = hook.BeforeListApplyQuery(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb, err = gorm2.ApplyCollectionOperators(ctx, db, &HealthMenstruationPersonalInfoORM{}, &HealthMenstruationPersonalInfo{}, f, s, p, fs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeListFind); ok {\n\t\tif db, err = hook.BeforeListFind(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb = db.Where(&ormObj)\n\tdb = db.Order(\"id\")\n\tormResponse := []HealthMenstruationPersonalInfoORM{}\n\tif err := db.Find(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterListFind); ok {\n\t\tif err = hook.AfterListFind(ctx, db, &ormResponse, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse := []*HealthMenstruationPersonalInfo{}\n\tfor _, responseEntry := range ormResponse {\n\t\ttemp, err := responseEntry.ToPB(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbResponse = append(pbResponse, &temp)\n\t}\n\treturn pbResponse, nil\n}", "func DefaultApplyFieldMaskHealthMenstruationPersonalInfo(ctx context.Context, patchee *HealthMenstruationPersonalInfo, patcher *HealthMenstruationPersonalInfo, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfileId\" {\n\t\t\tpatchee.ProfileId = patcher.ProfileId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"PeriodLengthInDays\" {\n\t\t\tpatchee.PeriodLengthInDays = patcher.PeriodLengthInDays\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CycleLengthInDays\" {\n\t\t\tpatchee.CycleLengthInDays = patcher.CycleLengthInDays\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultPatchSetHealthMenstruationPersonalInfo(ctx context.Context, objects []*HealthMenstruationPersonalInfo, updateMasks []*field_mask1.FieldMask, db *gorm1.DB) ([]*HealthMenstruationPersonalInfo, error) {\n\tif len(objects) != len(updateMasks) {\n\t\treturn nil, fmt.Errorf(errors1.BadRepeatedFieldMaskTpl, len(updateMasks), len(objects))\n\t}\n\n\tresults := make([]*HealthMenstruationPersonalInfo, 0, len(objects))\n\tfor i, patcher := range objects {\n\t\tpbResponse, err := DefaultPatchHealthMenstruationPersonalInfo(ctx, patcher, updateMasks[i], db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, pbResponse)\n\t}\n\n\treturn results, nil\n}", "func (duo *DoctorinfoUpdateOne) ClearEducationlevel() *DoctorinfoUpdateOne {\n\tduo.mutation.ClearEducationlevel()\n\treturn duo\n}", "func DefaultReadHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ormObj.Id == 0 {\n\t\treturn nil, errors1.EmptyIdError\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeReadApplyQuery); ok {\n\t\tif db, err = hook.BeforeReadApplyQuery(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif db, err = gorm2.ApplyFieldSelection(ctx, db, nil, &HealthMenstruationPersonalInfoORM{}); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeReadFind); ok {\n\t\tif db, err = hook.BeforeReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tormResponse := HealthMenstruationPersonalInfoORM{}\n\tif err = db.Where(&ormObj).First(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormResponse).(HealthMenstruationPersonalInfoORMWithAfterReadFind); ok {\n\t\tif err = hook.AfterReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormResponse.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func (du *DoctorinfoUpdate) ClearEducationlevel() *DoctorinfoUpdate {\n\tdu.mutation.ClearEducationlevel()\n\treturn du\n}", "func DefaultStrictUpdateHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, fmt.Errorf(\"Nil argument to DefaultStrictUpdateHealthMenstruationDailyEntry\")\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlockedRow := &HealthMenstruationDailyEntryORM{}\n\tdb.Model(&ormObj).Set(\"gorm:query_option\", \"FOR UPDATE\").Where(\"id=?\", ormObj.Id).First(lockedRow)\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateCleanup); ok {\n\t\tif db, err = hook.BeforeStrictUpdateCleanup(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateSave); ok {\n\t\tif db, err = hook.BeforeStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Save(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterStrictUpdateSave); ok {\n\t\tif err = hook.AfterStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &pbResponse, err\n}", "func DefaultCreateHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeCreate_); ok {\n\t\tif db, err = hook.BeforeCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Create(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterCreate_); ok {\n\t\tif err = hook.AfterCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func (puo *ProductUpdateOne) ClearPersonal() *ProductUpdateOne {\n\tpuo.mutation.ClearPersonal()\n\treturn puo\n}", "func (m *HealthMenstruationPersonalInfo) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for Id\n\n\tif v, ok := interface{}(m.GetCreatedAt()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationPersonalInfoValidationError{\n\t\t\t\tfield: \"CreatedAt\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetUpdatedAt()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationPersonalInfoValidationError{\n\t\t\t\tfield: \"UpdatedAt\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\t// no validation rules for ProfileId\n\n\t// no validation rules for PeriodLengthInDays\n\n\t// no validation rules for CycleLengthInDays\n\n\treturn nil\n}", "func (pu *ProductUpdate) ClearPersonal() *ProductUpdate {\n\tpu.mutation.ClearPersonal()\n\treturn pu\n}", "func (client ModelClient) UpdateHierarchicalEntityResponder(resp *http.Response) (result OperationStatus, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func DefaultPatchHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar pbObj HealthMenstruationDailyEntry\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadHealthMenstruationDailyEntry(ctx, &HealthMenstruationDailyEntry{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateHealthMenstruationDailyEntry(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(HealthMenstruationDailyEntryWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func (client ModelClient) UpdateHierarchicalEntityChildResponder(resp *http.Response) (result OperationStatus, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (uuo *UserUpdateOne) ClearChildren() *UserUpdateOne {\n\tuuo.mutation.ClearChildren()\n\treturn uuo\n}", "func (client ModelClient) UpdateHierarchicalEntityChildSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}", "func (hs *HealthStatusInfo) UpdateHealthInfo(details bool, registeredESS uint32, storedObjects uint32) {\n\ths.lock()\n\tdefer hs.unLock()\n\n\tHealthUsageInfo.RegisteredESS = registeredESS\n\tHealthUsageInfo.StoredObjects = storedObjects\n\n\tDBHealth.DBStatus = Green\n\ttimeSinceLastError := uint64(0)\n\tif DBHealth.DBReadFailures != 0 || DBHealth.DBWriteFailures != 0 {\n\t\ttimeSinceLastError = uint64(time.Since(DBHealth.lastReadWriteErrorTime).Seconds())\n\t\tDBHealth.TimeSinceLastReadWriteError = timeSinceLastError\n\t}\n\tif DBHealth.DisconnectedFromDB {\n\t\tDBHealth.DBStatus = Red\n\t} else if DBHealth.DBReadFailures != 0 || DBHealth.DBWriteFailures != 0 {\n\t\tif timeSinceLastError < uint64(Configuration.ResendInterval*12) {\n\t\t\tDBHealth.DBStatus = Red\n\t\t} else if timeSinceLastError < uint64(Configuration.ResendInterval*60) {\n\t\t\tDBHealth.DBStatus = Yellow\n\t\t}\n\t}\n\n\tMQTTHealth.MQTTConnectionStatus = Green\n\tif Configuration.CommunicationProtocol != HTTPProtocol {\n\t\ttimeSinceLastSubError := uint64(0)\n\t\tif MQTTHealth.SubscribeFailures != 0 {\n\t\t\ttimeSinceLastSubError = uint64(time.Since(MQTTHealth.lastSubscribeErrorTime).Seconds())\n\t\t\tMQTTHealth.TimeSinceLastSubscribeError = timeSinceLastSubError\n\t\t}\n\t\ttimeSinceLastPubError := uint64(0)\n\t\tif MQTTHealth.PublishFailures != 0 {\n\t\t\ttimeSinceLastPubError = uint64(time.Since(MQTTHealth.lastPublishErrorTime).Seconds())\n\t\t\tMQTTHealth.TimeSinceLastPublishError = timeSinceLastPubError\n\t\t}\n\t\tif MQTTHealth.DisconnectedFromMQTTBroker {\n\t\t\tMQTTHealth.MQTTConnectionStatus = Red\n\t\t} else {\n\t\t\tif MQTTHealth.SubscribeFailures != 0 {\n\t\t\t\tif timeSinceLastSubError < uint64(Configuration.ResendInterval*12) {\n\t\t\t\t\tMQTTHealth.MQTTConnectionStatus = Red\n\t\t\t\t} else if timeSinceLastSubError < uint64(Configuration.ResendInterval*60) {\n\t\t\t\t\tMQTTHealth.MQTTConnectionStatus = Yellow\n\t\t\t\t}\n\t\t\t}\n\t\t\tif MQTTHealth.PublishFailures != 0 && MQTTHealth.MQTTConnectionStatus == Green &&\n\t\t\t\ttimeSinceLastPubError < uint64(Configuration.ResendInterval*12) {\n\t\t\t\tMQTTHealth.MQTTConnectionStatus = Yellow\n\t\t\t}\n\t\t}\n\t}\n\n\tif DBHealth.DBStatus == Red || MQTTHealth.MQTTConnectionStatus == Red {\n\t\ths.HealthStatus = Red\n\t} else if DBHealth.DBStatus == Yellow || MQTTHealth.MQTTConnectionStatus == Yellow {\n\t\ths.HealthStatus = Yellow\n\t} else {\n\t\ths.HealthStatus = Green\n\t}\n\n\ths.UpTime = uint64(time.Since(hs.startTime).Seconds())\n\n\tif !details {\n\t\treturn\n\t}\n\n\tif Configuration.CommunicationProtocol != HTTPProtocol {\n\t\tMQTTHealth.LastDisconnectFromBrokerDuration = hs.GetLastDisconnectFromBrokerDuration()\n\t}\n\tDBHealth.LastDisconnectFromDBDuration = hs.GetLastDisconnectFromDBDuration()\n}", "func (db *DataBase) UpdatePlayerPersonalInfo(userID int32, user *models.UserPrivateInfo) (err error) {\n\tvar (\n\t\tconfirmedUser *models.UserPrivateInfo\n\t\ttx *sql.Tx\n\t)\n\n\tif tx, err = db.Db.Begin(); err != nil {\n\t\treturn\n\t}\n\tdefer tx.Rollback()\n\n\tif confirmedUser, err = db.getPrivateInfo(tx, userID); err != nil {\n\t\treturn\n\t}\n\n\tconfirmedUser.Update(user)\n\n\tif err = db.updatePlayerPersonalInfo(tx, user); err != nil {\n\t\treturn\n\t}\n\n\terr = tx.Commit()\n\treturn\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DefaultPatchHealthMenstruationPersonalInfo executes a basic gorm update call with patch behavior
func DefaultPatchHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) { if in == nil { return nil, errors1.NilArgumentError } var pbObj HealthMenstruationPersonalInfo var err error if hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchRead); ok { if db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil { return nil, err } } pbReadRes, err := DefaultReadHealthMenstruationPersonalInfo(ctx, &HealthMenstruationPersonalInfo{Id: in.GetId()}, db) if err != nil { return nil, err } pbObj = *pbReadRes if hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchApplyFieldMask); ok { if db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil { return nil, err } } if _, err := DefaultApplyFieldMaskHealthMenstruationPersonalInfo(ctx, &pbObj, in, updateMask, "", db); err != nil { return nil, err } if hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchSave); ok { if db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil { return nil, err } } pbResponse, err := DefaultStrictUpdateHealthMenstruationPersonalInfo(ctx, &pbObj, db) if err != nil { return nil, err } if hook, ok := interface{}(pbResponse).(HealthMenstruationPersonalInfoWithAfterPatchSave); ok { if err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil { return nil, err } } return pbResponse, nil }
[ "func DefaultStrictUpdateHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, fmt.Errorf(\"Nil argument to DefaultStrictUpdateHealthMenstruationPersonalInfo\")\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlockedRow := &HealthMenstruationPersonalInfoORM{}\n\tdb.Model(&ormObj).Set(\"gorm:query_option\", \"FOR UPDATE\").Where(\"id=?\", ormObj.Id).First(lockedRow)\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeStrictUpdateCleanup); ok {\n\t\tif db, err = hook.BeforeStrictUpdateCleanup(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeStrictUpdateSave); ok {\n\t\tif db, err = hook.BeforeStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Save(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterStrictUpdateSave); ok {\n\t\tif err = hook.AfterStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &pbResponse, err\n}", "func DefaultPatchSetHealthMenstruationPersonalInfo(ctx context.Context, objects []*HealthMenstruationPersonalInfo, updateMasks []*field_mask1.FieldMask, db *gorm1.DB) ([]*HealthMenstruationPersonalInfo, error) {\n\tif len(objects) != len(updateMasks) {\n\t\treturn nil, fmt.Errorf(errors1.BadRepeatedFieldMaskTpl, len(updateMasks), len(objects))\n\t}\n\n\tresults := make([]*HealthMenstruationPersonalInfo, 0, len(objects))\n\tfor i, patcher := range objects {\n\t\tpbResponse, err := DefaultPatchHealthMenstruationPersonalInfo(ctx, patcher, updateMasks[i], db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, pbResponse)\n\t}\n\n\treturn results, nil\n}", "func DefaultApplyFieldMaskHealthMenstruationPersonalInfo(ctx context.Context, patchee *HealthMenstruationPersonalInfo, patcher *HealthMenstruationPersonalInfo, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfileId\" {\n\t\t\tpatchee.ProfileId = patcher.ProfileId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"PeriodLengthInDays\" {\n\t\t\tpatchee.PeriodLengthInDays = patcher.PeriodLengthInDays\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CycleLengthInDays\" {\n\t\t\tpatchee.CycleLengthInDays = patcher.CycleLengthInDays\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultPatchHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar pbObj HealthMenstruationDailyEntry\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadHealthMenstruationDailyEntry(ctx, &HealthMenstruationDailyEntry{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateHealthMenstruationDailyEntry(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(HealthMenstruationDailyEntryWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func DefaultReadHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ormObj.Id == 0 {\n\t\treturn nil, errors1.EmptyIdError\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeReadApplyQuery); ok {\n\t\tif db, err = hook.BeforeReadApplyQuery(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif db, err = gorm2.ApplyFieldSelection(ctx, db, nil, &HealthMenstruationPersonalInfoORM{}); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeReadFind); ok {\n\t\tif db, err = hook.BeforeReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tormResponse := HealthMenstruationPersonalInfoORM{}\n\tif err = db.Where(&ormObj).First(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormResponse).(HealthMenstruationPersonalInfoORMWithAfterReadFind); ok {\n\t\tif err = hook.AfterReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormResponse.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultCreateHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeCreate_); ok {\n\t\tif db, err = hook.BeforeCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Create(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterCreate_); ok {\n\t\tif err = hook.AfterCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultListHealthMenstruationPersonalInfo(ctx context.Context, db *gorm1.DB, f *query1.Filtering, s *query1.Sorting, p *query1.Pagination, fs *query1.FieldSelection) ([]*HealthMenstruationPersonalInfo, error) {\n\tin := HealthMenstruationPersonalInfo{}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeListApplyQuery); ok {\n\t\tif db, err = hook.BeforeListApplyQuery(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb, err = gorm2.ApplyCollectionOperators(ctx, db, &HealthMenstruationPersonalInfoORM{}, &HealthMenstruationPersonalInfo{}, f, s, p, fs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeListFind); ok {\n\t\tif db, err = hook.BeforeListFind(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb = db.Where(&ormObj)\n\tdb = db.Order(\"id\")\n\tormResponse := []HealthMenstruationPersonalInfoORM{}\n\tif err := db.Find(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterListFind); ok {\n\t\tif err = hook.AfterListFind(ctx, db, &ormResponse, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse := []*HealthMenstruationPersonalInfo{}\n\tfor _, responseEntry := range ormResponse {\n\t\ttemp, err := responseEntry.ToPB(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbResponse = append(pbResponse, &temp)\n\t}\n\treturn pbResponse, nil\n}", "func (t *SimpleChaincode) change_hospital(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\n\t/*\n\t Our model looks like\n\t -------------------------------------------------------\n\t 0 1\n\t \"bloodTestID\", \"Hospital\"\n\t -------------------------------------------------------\n\t*/\n\thospital := args[1]\n\tfmt.Println(\"it might actually work now\")\n\tbloodTestList, err := stub.GetState(bloodTestIndex)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to get intList\")\n\t}\n\tvar bloodInd []string\n\n\terr = json.Unmarshal(bloodTestList, &bloodInd)\n\tif err != nil {\n\t\tfmt.Println(\"you dun goofed\")\n\t}\n\tres := bloodTest{}\n\tvar bloodAsBytes []byte\n\tfor i := range bloodInd {\n\t\tbloodAsBytes, err = stub.GetState(bloodInd[i])\n\t\tjson.Unmarshal(bloodAsBytes, &res)\n\t\tif res.BloodTestID == args[0] {\n\t\t\tfmt.Println(\"found it\")\n\t\t\tres.Hospital = hospital\n\t\t\tt := time.Now()\n\t\t\tt.Format(\"20060102150405\")\n\t\t\tres.TimeStampHospital = t.String()\n\t\t\tjsonAsBytes, _ := json.Marshal(res)\n\t\t\terr = stub.PutState(args[0], jsonAsBytes)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, nil\n}", "func (t *MedChain) updateHospital(stub shim.ChaincodeStubInterface, args []string) peer.Response {\n\t\t// ==== Input sanitation ====\n\t\tfmt.Println(\"- start updateHospital\")\n\n\t\t// check if all the args are send\n\t\tif len(args) != 4 {\n\t\t\treturn shim.Error(\"Incorrect number of arguments, Required 4 arguments\")\n\t\t}\n\n\t\t// check if the args are empty\n\t\tfor i := 0; i < len(args); i++ {\n\t\t\tif len(args[i]) <= 0 {\n\t\t\t\treturn shim.Error(\"argument \"+ string(i+1) + \" must be a non-empty string\")\n\t\t\t}\n\t\t}\n\n\t\tgetAssetAsBytes, errT := stub.GetState(args[0])\n\n\t\tif errT != nil {\n\t\t\treturn shim.Error(fmt.Sprintf(\"Error : Cannot find Hospital %s\" , errT))\n\t\t}\n\n\t\tif getAssetAsBytes == nil {\n\t\t\treturn shim.Error(fmt.Sprintf(\"Cannot find asset Hospital with ID %s\" , args[0]))\n\t\t}\n\n\t\tvar obj = Hospital{}\n\n\t\tjson.Unmarshal(getAssetAsBytes, &obj)\n\t\tobj.HospitalName = args[1]\n\t\tobj.HospitalAddress = args[2]\n\t\tobj.HospitalPhone = args[3]\n\t\tcomAssetAsBytes, errMarshal := json.Marshal(obj)\n\n\t\tif errMarshal != nil {\n\t\t\treturn shim.Error(fmt.Sprintf(\"Marshal Error: %s\", errMarshal))\n\t\t}\n\n\t\terrPut := stub.PutState(obj.Hospital_ID, comAssetAsBytes)\n\n\t\tif errPut != nil {\n\t\t\treturn shim.Error(fmt.Sprintf(\"Failed to update Hospital with ID %s\", args[0]))\n\t\t}\n\n\t\tfmt.Println(\"Hospital asset with ID %s was updated \\n %v\", args[0], obj)\n\n\t\treturn shim.Success(comAssetAsBytes)\n\t}", "func (m *UpdateHealthMenstruationPersonalInfoRequest) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif v, ok := interface{}(m.GetPayload()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn UpdateHealthMenstruationPersonalInfoRequestValidationError{\n\t\t\t\tfield: \"Payload\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (sp *serviceProvider) Modify(conn orm.Connection, uid int32, name, mobile, email *string) error {\n\tstaff := &Staff{}\n\n\tdb := conn.(*gorm.DB).Exec(\"USE staff\")\n\n\treturn db.Model(staff).Where(\"id = ?\", uid).Updates(map[string]interface{}{\n\t\t\"name\": *name,\n\t\t\"mobile\": *mobile,\n\t\t\"email\": *email,\n\t}).Limit(1).Error\n}", "func DefaultPatchUserInfo(ctx context.Context, in *UserInfo, updateMask *field_mask.FieldMask, db *gorm.DB) (*UserInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors.NilArgumentError\n\t}\n\tvar pbObj UserInfo\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(UserInfoWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadUserInfo(ctx, &UserInfo{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(UserInfoWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskUserInfo(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(UserInfoWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateUserInfo(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(UserInfoWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func (t *HeathCare_Chaincode) modifyMedicalData(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tfmt.Println(\"\\n=============== start modifyMedicalData function ===============\")\n\tstart := time.Now()\n\ttime.Sleep(time.Second)\n\n\tvar jsonResp string\n\n\tif len(args) != 8 {\n\t\treturn shim.Error(\"expecting 4 argument\")\n\t}\n\n\t//define identity of query-er and new value of medical record\n\tuserid := args[0]\n\tpatientid := args[1]\n\tlocation := args[2]\n\tcollection := args[3]\n\n\tnewPersonalIdentificationInformation := args[4]\n\tnewMedicalHistory := args[5]\n\tnewFamilyMedicalHistory := args[6]\n\tnewMedicationHistory := args[7]\n\tnewTreatmentHistory := args[8]\n\tnewMedicalDirectives := args[9]\n\ttimeQuery := time.Now().String()\n\n\t//get user identity before query\n\tuserIdentityAsBytes, errUserIdentityAsByte := stub.GetPrivateData(collection, userid)\n\tif errUserIdentityAsByte != nil {\n\t\treturn shim.Error(\"cannot get user identity\")\n\t} else if userIdentityAsBytes == nil {\n\t\treturn shim.Error(\"user does not exist\")\n\t}\n\n\t//create query object with purpose: modify\n\tobjectType := \"Query\"\n\tquery := &Query{objectType, userid, patientid, location, timeQuery, \"modify\"}\n\tqueryAsByte, errQueryAsByte := json.Marshal(query)\n\tif errQueryAsByte != nil {\n\t\treturn shim.Error(errQueryAsByte.Error())\n\t}\n\n\t//save to database\n\terrQueryAsByte = stub.PutPrivateData(\"modifyCollection\", userid, queryAsByte)\n\tif errQueryAsByte != nil {\n\t\treturn shim.Error(errQueryAsByte.Error())\n\t}\n\n\t//create index key\n\tindexName := \"userid~patientid\"\n\tqueryIndexKey, errQueryIndexKey := stub.CreateCompositeKey(indexName, []string{query.UserID, query.PatientID, query.Location, query.Purpose})\n\tif errQueryIndexKey != nil {\n\t\treturn shim.Error(errQueryIndexKey.Error())\n\t}\n\n\t//save index\n\tvalue := []byte{0x00}\n\tstub.PutPrivateData(\"modifyCollection\", queryIndexKey, value)\n\n\t//get medical record data\n\tmedicalRecordAsBytes, errMedicalRecordAsByte := stub.GetPrivateData(\"MedicalRecordCollection\", patientid)\n\tif errMedicalRecordAsByte != nil {\n\t\tjsonResp = \"{\\\"Error\\\":\\\"Failed to get state for \" + patientid + \": \" + errMedicalRecordAsByte.Error() + \"\\\"}\"\n\t\treturn shim.Error(jsonResp)\n\t} else if errMedicalRecordAsByte == nil {\n\t\treturn shim.Error(\"patient's data does not exist\")\n\t}\n\n\t//convert data of patient to json\n\tmedicalRecord := &MedicalRecord{}\n\terrMedicalRecordAsByte = json.Unmarshal(medicalRecordAsBytes, medicalRecord)\n\n\t//change data\n\tmedicalRecord.PersonalIdentificationInformation = newPersonalIdentificationInformation\n\tmedicalRecord.MedicalHistory = newMedicalHistory\n\tmedicalRecord.FamilyMedicalHistory = newFamilyMedicalHistory\n\tmedicalRecord.MedicationHistory = newMedicationHistory\n\tmedicalRecord.TreatmentHistory = newTreatmentHistory\n\tmedicalRecord.MedicalDirectives = newMedicalDirectives\n\n\t//convert new medical record data to byte\n\tnewMedicalRecordAsByte, errNewMedicalRecordAsByte := json.Marshal(medicalRecord)\n\n\t//store new data\n\terrNewMedicalRecordAsByte = stub.PutPrivateData(\"MedicalRecordCollection\", patientid, newMedicalRecordAsByte)\n\tif errNewMedicalRecordAsByte != nil {\n\t\treturn shim.Error(\"cannot save new medical record's data\")\n\t}\n\n\tend := time.Now()\n\telapsed := time.Since(start)\n\tfmt.Println(\"function modifyMedicalData\")\n\tfmt.Println(\"time start: \", start.String())\n\tfmt.Println(\"time end: \", end.String())\n\tfmt.Println(\"time execute: \", elapsed.String())\n\tfmt.Println(\"=============== end modifyMedicalData function ===============\")\n\n\treturn shim.Success(nil)\n}", "func (m *UpdateHealthMenstruationPersonalInfoResponse) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif v, ok := interface{}(m.GetResult()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn UpdateHealthMenstruationPersonalInfoResponseValidationError{\n\t\t\t\tfield: \"Result\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func ModifyPerson(w http.ResponseWriter, r *http.Request) {\n\tdecoder := json.NewDecoder(r.Body)\n\tvar p Person\n\terr := decoder.Decode(&p)\n\tcheck(\"Json Decorder Failure\", err)\n\tparams := mux.Vars(r)\n\titem := params[\"id\"]\n\tui, err := strconv.Atoi(item)\n\tcheck(\"String Conversion Failure\", err)\n\n\tcp := KeyStore[ui]\n\tif len(p.FirstName) > 0 {\n\t\tcp.FirstName = p.FirstName\n\t}\n\tif len(p.LastName) > 0 {\n\t\tcp.LastName = p.LastName\n\t}\n\tif len(p.EmailAddr) > 0 {\n\t\tcp.EmailAddr = p.EmailAddr\n\t}\n\tif len(p.PhoneNumb) > 0 {\n\t\tcp.PhoneNumb = p.PhoneNumb\n\t}\n\tnp := Person{cp.UniqID, cp.FirstName, cp.LastName, cp.EmailAddr, cp.PhoneNumb}\n\tKeyStore[ui] = np\n\tsaveDatabase()\n}", "func DefaultStrictUpdateHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, fmt.Errorf(\"Nil argument to DefaultStrictUpdateHealthMenstruationDailyEntry\")\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlockedRow := &HealthMenstruationDailyEntryORM{}\n\tdb.Model(&ormObj).Set(\"gorm:query_option\", \"FOR UPDATE\").Where(\"id=?\", ormObj.Id).First(lockedRow)\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateCleanup); ok {\n\t\tif db, err = hook.BeforeStrictUpdateCleanup(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateSave); ok {\n\t\tif db, err = hook.BeforeStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Save(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterStrictUpdateSave); ok {\n\t\tif err = hook.AfterStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &pbResponse, err\n}", "func (service *EmployeeService) PatchEmployeeDetails(employeeID string, employeeDetails models.Employee) error {\n\tcollection := service.mongoClient.Database(DbName).Collection(CollectionName)\n\tupdatesToBePerformed := bson.M{}\n\tupdatesToBePerformed[\"employeeid\"] = employeeID\n\tif employeeDetails.Department != nil {\n\t\tupdatesToBePerformed[\"department\"] = employeeDetails.Department\n\t}\n\n\tif employeeDetails.Name != nil {\n\t\tupdatesToBePerformed[\"name\"] = employeeDetails.Name\n\t}\n\n\tif employeeDetails.Skills != nil {\n\t\tupdatesToBePerformed[\"skills\"] = employeeDetails.Skills\n\t}\n\n\tif employeeDetails.Address != nil {\n\t\taddress := models.Address{}\n\t\tif employeeDetails.Address.City != nil {\n\t\t\taddress.City = employeeDetails.Address.City\n\t\t}\n\n\t\tif employeeDetails.Address.Country != nil {\n\t\t\taddress.Country = employeeDetails.Address.Country\n\t\t}\n\n\t\tif employeeDetails.Address.DoorNo != nil {\n\t\t\taddress.DoorNo = employeeDetails.Address.DoorNo\n\t\t}\n\n\t\tif employeeDetails.Address.State != nil {\n\t\t\taddress.State = employeeDetails.Address.State\n\t\t}\n\n\t\tupdatesToBePerformed[\"address\"] = address\n\t}\n\n\tif employeeDetails.Status != nil {\n\t\tupdatesToBePerformed[\"status\"] = employeeDetails.Status\n\t}\n\n\t// consolidatedMap(&updatesToBePerformed, employeeDetails)\n\n\tresult, err := collection.UpdateOne(\n\t\tcontext.Background(),\n\t\tbson.M{\"employeeid\": employeeID},\n\t\tbson.M{\n\t\t\t\"$set\": updatesToBePerformed,\n\t\t})\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tfmt.Println(result)\n\n\treturn nil\n}", "func DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx context.Context, patchee *HealthMenstruationDailyEntry, patcher *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfileId\" {\n\t\t\tpatchee.ProfileId = patcher.ProfileId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Day\" {\n\t\t\tpatchee.Day = patcher.Day\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"IntensityPercentage\" {\n\t\t\tpatchee.IntensityPercentage = patcher.IntensityPercentage\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Type\" {\n\t\t\tpatchee.Type = patcher.Type\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Manual\" {\n\t\t\tpatchee.Manual = patcher.Manual\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"BasedOnPrediction\" {\n\t\t\tpatchee.BasedOnPrediction = patcher.BasedOnPrediction\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func (m *ManagedTenantsDeviceHealthStatusesDeviceHealthStatusItemRequestBuilder) Patch(ctx context.Context, body i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.DeviceHealthStatusable, requestConfiguration *ManagedTenantsDeviceHealthStatusesDeviceHealthStatusItemRequestBuilderPatchRequestConfiguration)(i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.DeviceHealthStatusable, error) {\n requestInfo, err := m.ToPatchRequestInformation(ctx, body, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.CreateDeviceHealthStatusFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.DeviceHealthStatusable), nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DefaultPatchSetHealthMenstruationPersonalInfo executes a bulk gorm update call with patch behavior
func DefaultPatchSetHealthMenstruationPersonalInfo(ctx context.Context, objects []*HealthMenstruationPersonalInfo, updateMasks []*field_mask1.FieldMask, db *gorm1.DB) ([]*HealthMenstruationPersonalInfo, error) { if len(objects) != len(updateMasks) { return nil, fmt.Errorf(errors1.BadRepeatedFieldMaskTpl, len(updateMasks), len(objects)) } results := make([]*HealthMenstruationPersonalInfo, 0, len(objects)) for i, patcher := range objects { pbResponse, err := DefaultPatchHealthMenstruationPersonalInfo(ctx, patcher, updateMasks[i], db) if err != nil { return nil, err } results = append(results, pbResponse) } return results, nil }
[ "func DefaultPatchHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar pbObj HealthMenstruationPersonalInfo\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadHealthMenstruationPersonalInfo(ctx, &HealthMenstruationPersonalInfo{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskHealthMenstruationPersonalInfo(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateHealthMenstruationPersonalInfo(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(HealthMenstruationPersonalInfoWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func DefaultStrictUpdateHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, fmt.Errorf(\"Nil argument to DefaultStrictUpdateHealthMenstruationPersonalInfo\")\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlockedRow := &HealthMenstruationPersonalInfoORM{}\n\tdb.Model(&ormObj).Set(\"gorm:query_option\", \"FOR UPDATE\").Where(\"id=?\", ormObj.Id).First(lockedRow)\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeStrictUpdateCleanup); ok {\n\t\tif db, err = hook.BeforeStrictUpdateCleanup(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeStrictUpdateSave); ok {\n\t\tif db, err = hook.BeforeStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Save(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterStrictUpdateSave); ok {\n\t\tif err = hook.AfterStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &pbResponse, err\n}", "func DefaultApplyFieldMaskHealthMenstruationPersonalInfo(ctx context.Context, patchee *HealthMenstruationPersonalInfo, patcher *HealthMenstruationPersonalInfo, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfileId\" {\n\t\t\tpatchee.ProfileId = patcher.ProfileId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"PeriodLengthInDays\" {\n\t\t\tpatchee.PeriodLengthInDays = patcher.PeriodLengthInDays\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CycleLengthInDays\" {\n\t\t\tpatchee.CycleLengthInDays = patcher.CycleLengthInDays\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultListHealthMenstruationPersonalInfo(ctx context.Context, db *gorm1.DB, f *query1.Filtering, s *query1.Sorting, p *query1.Pagination, fs *query1.FieldSelection) ([]*HealthMenstruationPersonalInfo, error) {\n\tin := HealthMenstruationPersonalInfo{}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeListApplyQuery); ok {\n\t\tif db, err = hook.BeforeListApplyQuery(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb, err = gorm2.ApplyCollectionOperators(ctx, db, &HealthMenstruationPersonalInfoORM{}, &HealthMenstruationPersonalInfo{}, f, s, p, fs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeListFind); ok {\n\t\tif db, err = hook.BeforeListFind(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb = db.Where(&ormObj)\n\tdb = db.Order(\"id\")\n\tormResponse := []HealthMenstruationPersonalInfoORM{}\n\tif err := db.Find(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterListFind); ok {\n\t\tif err = hook.AfterListFind(ctx, db, &ormResponse, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse := []*HealthMenstruationPersonalInfo{}\n\tfor _, responseEntry := range ormResponse {\n\t\ttemp, err := responseEntry.ToPB(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbResponse = append(pbResponse, &temp)\n\t}\n\treturn pbResponse, nil\n}", "func DefaultReadHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ormObj.Id == 0 {\n\t\treturn nil, errors1.EmptyIdError\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeReadApplyQuery); ok {\n\t\tif db, err = hook.BeforeReadApplyQuery(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif db, err = gorm2.ApplyFieldSelection(ctx, db, nil, &HealthMenstruationPersonalInfoORM{}); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeReadFind); ok {\n\t\tif db, err = hook.BeforeReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tormResponse := HealthMenstruationPersonalInfoORM{}\n\tif err = db.Where(&ormObj).First(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormResponse).(HealthMenstruationPersonalInfoORMWithAfterReadFind); ok {\n\t\tif err = hook.AfterReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormResponse.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultCreateHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeCreate_); ok {\n\t\tif db, err = hook.BeforeCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Create(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterCreate_); ok {\n\t\tif err = hook.AfterCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultPatchHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar pbObj HealthMenstruationDailyEntry\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadHealthMenstruationDailyEntry(ctx, &HealthMenstruationDailyEntry{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateHealthMenstruationDailyEntry(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(HealthMenstruationDailyEntryWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func DefaultPatchSetHealthMenstruationDailyEntry(ctx context.Context, objects []*HealthMenstruationDailyEntry, updateMasks []*field_mask1.FieldMask, db *gorm1.DB) ([]*HealthMenstruationDailyEntry, error) {\n\tif len(objects) != len(updateMasks) {\n\t\treturn nil, fmt.Errorf(errors1.BadRepeatedFieldMaskTpl, len(updateMasks), len(objects))\n\t}\n\n\tresults := make([]*HealthMenstruationDailyEntry, 0, len(objects))\n\tfor i, patcher := range objects {\n\t\tpbResponse, err := DefaultPatchHealthMenstruationDailyEntry(ctx, patcher, updateMasks[i], db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, pbResponse)\n\t}\n\n\treturn results, nil\n}", "func (s *Service) PrivacyBatchModify(c context.Context, mid int64, data map[string]int) (err error) {\n\tgroup, errCtx := errgroup.WithContext(c)\n\tfor k, v := range data {\n\t\tfield := k\n\t\tvalue := v\n\t\tgroup.Go(func() error {\n\t\t\tif e := s.PrivacyModify(errCtx, mid, field, value); e != nil {\n\t\t\t\tlog.Warn(\"PrivacyBatchModify mid(%d) filed(%s) value(%d) error(%v)\", mid, field, value, e)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\tgroup.Wait()\n\treturn\n}", "func (m *ManagedTenantsDeviceHealthStatusesDeviceHealthStatusItemRequestBuilder) Patch(ctx context.Context, body i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.DeviceHealthStatusable, requestConfiguration *ManagedTenantsDeviceHealthStatusesDeviceHealthStatusItemRequestBuilderPatchRequestConfiguration)(i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.DeviceHealthStatusable, error) {\n requestInfo, err := m.ToPatchRequestInformation(ctx, body, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.CreateDeviceHealthStatusFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.DeviceHealthStatusable), nil\n}", "func (t *HeathCare_Chaincode) modifyMedicalData(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tfmt.Println(\"\\n=============== start modifyMedicalData function ===============\")\n\tstart := time.Now()\n\ttime.Sleep(time.Second)\n\n\tvar jsonResp string\n\n\tif len(args) != 8 {\n\t\treturn shim.Error(\"expecting 4 argument\")\n\t}\n\n\t//define identity of query-er and new value of medical record\n\tuserid := args[0]\n\tpatientid := args[1]\n\tlocation := args[2]\n\tcollection := args[3]\n\n\tnewPersonalIdentificationInformation := args[4]\n\tnewMedicalHistory := args[5]\n\tnewFamilyMedicalHistory := args[6]\n\tnewMedicationHistory := args[7]\n\tnewTreatmentHistory := args[8]\n\tnewMedicalDirectives := args[9]\n\ttimeQuery := time.Now().String()\n\n\t//get user identity before query\n\tuserIdentityAsBytes, errUserIdentityAsByte := stub.GetPrivateData(collection, userid)\n\tif errUserIdentityAsByte != nil {\n\t\treturn shim.Error(\"cannot get user identity\")\n\t} else if userIdentityAsBytes == nil {\n\t\treturn shim.Error(\"user does not exist\")\n\t}\n\n\t//create query object with purpose: modify\n\tobjectType := \"Query\"\n\tquery := &Query{objectType, userid, patientid, location, timeQuery, \"modify\"}\n\tqueryAsByte, errQueryAsByte := json.Marshal(query)\n\tif errQueryAsByte != nil {\n\t\treturn shim.Error(errQueryAsByte.Error())\n\t}\n\n\t//save to database\n\terrQueryAsByte = stub.PutPrivateData(\"modifyCollection\", userid, queryAsByte)\n\tif errQueryAsByte != nil {\n\t\treturn shim.Error(errQueryAsByte.Error())\n\t}\n\n\t//create index key\n\tindexName := \"userid~patientid\"\n\tqueryIndexKey, errQueryIndexKey := stub.CreateCompositeKey(indexName, []string{query.UserID, query.PatientID, query.Location, query.Purpose})\n\tif errQueryIndexKey != nil {\n\t\treturn shim.Error(errQueryIndexKey.Error())\n\t}\n\n\t//save index\n\tvalue := []byte{0x00}\n\tstub.PutPrivateData(\"modifyCollection\", queryIndexKey, value)\n\n\t//get medical record data\n\tmedicalRecordAsBytes, errMedicalRecordAsByte := stub.GetPrivateData(\"MedicalRecordCollection\", patientid)\n\tif errMedicalRecordAsByte != nil {\n\t\tjsonResp = \"{\\\"Error\\\":\\\"Failed to get state for \" + patientid + \": \" + errMedicalRecordAsByte.Error() + \"\\\"}\"\n\t\treturn shim.Error(jsonResp)\n\t} else if errMedicalRecordAsByte == nil {\n\t\treturn shim.Error(\"patient's data does not exist\")\n\t}\n\n\t//convert data of patient to json\n\tmedicalRecord := &MedicalRecord{}\n\terrMedicalRecordAsByte = json.Unmarshal(medicalRecordAsBytes, medicalRecord)\n\n\t//change data\n\tmedicalRecord.PersonalIdentificationInformation = newPersonalIdentificationInformation\n\tmedicalRecord.MedicalHistory = newMedicalHistory\n\tmedicalRecord.FamilyMedicalHistory = newFamilyMedicalHistory\n\tmedicalRecord.MedicationHistory = newMedicationHistory\n\tmedicalRecord.TreatmentHistory = newTreatmentHistory\n\tmedicalRecord.MedicalDirectives = newMedicalDirectives\n\n\t//convert new medical record data to byte\n\tnewMedicalRecordAsByte, errNewMedicalRecordAsByte := json.Marshal(medicalRecord)\n\n\t//store new data\n\terrNewMedicalRecordAsByte = stub.PutPrivateData(\"MedicalRecordCollection\", patientid, newMedicalRecordAsByte)\n\tif errNewMedicalRecordAsByte != nil {\n\t\treturn shim.Error(\"cannot save new medical record's data\")\n\t}\n\n\tend := time.Now()\n\telapsed := time.Since(start)\n\tfmt.Println(\"function modifyMedicalData\")\n\tfmt.Println(\"time start: \", start.String())\n\tfmt.Println(\"time end: \", end.String())\n\tfmt.Println(\"time execute: \", elapsed.String())\n\tfmt.Println(\"=============== end modifyMedicalData function ===============\")\n\n\treturn shim.Success(nil)\n}", "func (m *UpdateHealthMenstruationPersonalInfoRequest) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif v, ok := interface{}(m.GetPayload()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn UpdateHealthMenstruationPersonalInfoRequestValidationError{\n\t\t\t\tfield: \"Payload\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (t *SimpleChaincode) change_hospital(stub shim.ChaincodeStubInterface, args []string) ([]byte, error) {\n\n\t/*\n\t Our model looks like\n\t -------------------------------------------------------\n\t 0 1\n\t \"bloodTestID\", \"Hospital\"\n\t -------------------------------------------------------\n\t*/\n\thospital := args[1]\n\tfmt.Println(\"it might actually work now\")\n\tbloodTestList, err := stub.GetState(bloodTestIndex)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to get intList\")\n\t}\n\tvar bloodInd []string\n\n\terr = json.Unmarshal(bloodTestList, &bloodInd)\n\tif err != nil {\n\t\tfmt.Println(\"you dun goofed\")\n\t}\n\tres := bloodTest{}\n\tvar bloodAsBytes []byte\n\tfor i := range bloodInd {\n\t\tbloodAsBytes, err = stub.GetState(bloodInd[i])\n\t\tjson.Unmarshal(bloodAsBytes, &res)\n\t\tif res.BloodTestID == args[0] {\n\t\t\tfmt.Println(\"found it\")\n\t\t\tres.Hospital = hospital\n\t\t\tt := time.Now()\n\t\t\tt.Format(\"20060102150405\")\n\t\t\tres.TimeStampHospital = t.String()\n\t\t\tjsonAsBytes, _ := json.Marshal(res)\n\t\t\terr = stub.PutState(args[0], jsonAsBytes)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, nil\n}", "func (m *UpdateHealthMenstruationPersonalInfoResponse) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif v, ok := interface{}(m.GetResult()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn UpdateHealthMenstruationPersonalInfoResponseValidationError{\n\t\t\t\tfield: \"Result\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx context.Context, patchee *HealthMenstruationDailyEntry, patcher *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfileId\" {\n\t\t\tpatchee.ProfileId = patcher.ProfileId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Day\" {\n\t\t\tpatchee.Day = patcher.Day\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"IntensityPercentage\" {\n\t\t\tpatchee.IntensityPercentage = patcher.IntensityPercentage\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Type\" {\n\t\t\tpatchee.Type = patcher.Type\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Manual\" {\n\t\t\tpatchee.Manual = patcher.Manual\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"BasedOnPrediction\" {\n\t\t\tpatchee.BasedOnPrediction = patcher.BasedOnPrediction\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func (m *ItemEmployeeExperienceRequestBuilder) Patch(ctx context.Context, body iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.EmployeeExperienceUserable, requestConfiguration *ItemEmployeeExperienceRequestBuilderPatchRequestConfiguration)(iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.EmployeeExperienceUserable, error) {\n requestInfo, err := m.ToPatchRequestInformation(ctx, body, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.CreateEmployeeExperienceUserFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.EmployeeExperienceUserable), nil\n}", "func (sp *serviceProvider) Modify(conn orm.Connection, uid int32, name, mobile, email *string) error {\n\tstaff := &Staff{}\n\n\tdb := conn.(*gorm.DB).Exec(\"USE staff\")\n\n\treturn db.Model(staff).Where(\"id = ?\", uid).Updates(map[string]interface{}{\n\t\t\"name\": *name,\n\t\t\"mobile\": *mobile,\n\t\t\"email\": *email,\n\t}).Limit(1).Error\n}", "func DefaultStrictUpdateHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, fmt.Errorf(\"Nil argument to DefaultStrictUpdateHealthMenstruationDailyEntry\")\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlockedRow := &HealthMenstruationDailyEntryORM{}\n\tdb.Model(&ormObj).Set(\"gorm:query_option\", \"FOR UPDATE\").Where(\"id=?\", ormObj.Id).First(lockedRow)\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateCleanup); ok {\n\t\tif db, err = hook.BeforeStrictUpdateCleanup(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateSave); ok {\n\t\tif db, err = hook.BeforeStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Save(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterStrictUpdateSave); ok {\n\t\tif err = hook.AfterStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &pbResponse, err\n}", "func (service *EmployeeService) PatchEmployeeDetails(employeeID string, employeeDetails models.Employee) error {\n\tcollection := service.mongoClient.Database(DbName).Collection(CollectionName)\n\tupdatesToBePerformed := bson.M{}\n\tupdatesToBePerformed[\"employeeid\"] = employeeID\n\tif employeeDetails.Department != nil {\n\t\tupdatesToBePerformed[\"department\"] = employeeDetails.Department\n\t}\n\n\tif employeeDetails.Name != nil {\n\t\tupdatesToBePerformed[\"name\"] = employeeDetails.Name\n\t}\n\n\tif employeeDetails.Skills != nil {\n\t\tupdatesToBePerformed[\"skills\"] = employeeDetails.Skills\n\t}\n\n\tif employeeDetails.Address != nil {\n\t\taddress := models.Address{}\n\t\tif employeeDetails.Address.City != nil {\n\t\t\taddress.City = employeeDetails.Address.City\n\t\t}\n\n\t\tif employeeDetails.Address.Country != nil {\n\t\t\taddress.Country = employeeDetails.Address.Country\n\t\t}\n\n\t\tif employeeDetails.Address.DoorNo != nil {\n\t\t\taddress.DoorNo = employeeDetails.Address.DoorNo\n\t\t}\n\n\t\tif employeeDetails.Address.State != nil {\n\t\t\taddress.State = employeeDetails.Address.State\n\t\t}\n\n\t\tupdatesToBePerformed[\"address\"] = address\n\t}\n\n\tif employeeDetails.Status != nil {\n\t\tupdatesToBePerformed[\"status\"] = employeeDetails.Status\n\t}\n\n\t// consolidatedMap(&updatesToBePerformed, employeeDetails)\n\n\tresult, err := collection.UpdateOne(\n\t\tcontext.Background(),\n\t\tbson.M{\"employeeid\": employeeID},\n\t\tbson.M{\n\t\t\t\"$set\": updatesToBePerformed,\n\t\t})\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tfmt.Println(result)\n\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DefaultApplyFieldMaskHealthMenstruationPersonalInfo patches an pbObject with patcher according to a field mask.
func DefaultApplyFieldMaskHealthMenstruationPersonalInfo(ctx context.Context, patchee *HealthMenstruationPersonalInfo, patcher *HealthMenstruationPersonalInfo, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) { if patcher == nil { return nil, nil } else if patchee == nil { return nil, errors1.NilArgumentError } var err error for _, f := range updateMask.Paths { if f == prefix+"Id" { patchee.Id = patcher.Id continue } if f == prefix+"CreatedAt" { patchee.CreatedAt = patcher.CreatedAt continue } if f == prefix+"UpdatedAt" { patchee.UpdatedAt = patcher.UpdatedAt continue } if f == prefix+"ProfileId" { patchee.ProfileId = patcher.ProfileId continue } if f == prefix+"PeriodLengthInDays" { patchee.PeriodLengthInDays = patcher.PeriodLengthInDays continue } if f == prefix+"CycleLengthInDays" { patchee.CycleLengthInDays = patcher.CycleLengthInDays continue } } if err != nil { return nil, err } return patchee, nil }
[ "func DefaultPatchSetHealthMenstruationPersonalInfo(ctx context.Context, objects []*HealthMenstruationPersonalInfo, updateMasks []*field_mask1.FieldMask, db *gorm1.DB) ([]*HealthMenstruationPersonalInfo, error) {\n\tif len(objects) != len(updateMasks) {\n\t\treturn nil, fmt.Errorf(errors1.BadRepeatedFieldMaskTpl, len(updateMasks), len(objects))\n\t}\n\n\tresults := make([]*HealthMenstruationPersonalInfo, 0, len(objects))\n\tfor i, patcher := range objects {\n\t\tpbResponse, err := DefaultPatchHealthMenstruationPersonalInfo(ctx, patcher, updateMasks[i], db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, pbResponse)\n\t}\n\n\treturn results, nil\n}", "func DefaultPatchHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar pbObj HealthMenstruationPersonalInfo\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadHealthMenstruationPersonalInfo(ctx, &HealthMenstruationPersonalInfo{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskHealthMenstruationPersonalInfo(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateHealthMenstruationPersonalInfo(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(HealthMenstruationPersonalInfoWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func DefaultApplyFieldMaskUserInfo(ctx context.Context, patchee *UserInfo, patcher *UserInfo, updateMask *field_mask.FieldMask, prefix string, db *gorm.DB) (*UserInfo, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors.NilArgumentError\n\t}\n\tvar err error\n\tvar updatedCreatedAt bool\n\tvar updatedUpdatedAt bool\n\tfor i, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UserId\" {\n\t\t\tpatchee.UserId = patcher.UserId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"LastName\" {\n\t\t\tpatchee.LastName = patcher.LastName\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"FirstName\" {\n\t\t\tpatchee.FirstName = patcher.FirstName\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Period\" {\n\t\t\tpatchee.Period = patcher.Period\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"DepartmentId\" {\n\t\t\tpatchee.DepartmentId = patcher.DepartmentId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"JobId\" {\n\t\t\tpatchee.JobId = patcher.JobId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"EnrollmentFlg\" {\n\t\t\tpatchee.EnrollmentFlg = patcher.EnrollmentFlg\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"AdminFlg\" {\n\t\t\tpatchee.AdminFlg = patcher.AdminFlg\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedCreatedAt && strings.HasPrefix(f, prefix+\"CreatedAt.\") {\n\t\t\tif patcher.CreatedAt == nil {\n\t\t\t\tpatchee.CreatedAt = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.CreatedAt == nil {\n\t\t\t\tpatchee.CreatedAt = &timestamp.Timestamp{}\n\t\t\t}\n\t\t\tchildMask := &field_mask.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"CreatedAt.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm1.MergeWithMask(patcher.CreatedAt, patchee.CreatedAt, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tupdatedCreatedAt = true\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedUpdatedAt && strings.HasPrefix(f, prefix+\"UpdatedAt.\") {\n\t\t\tif patcher.UpdatedAt == nil {\n\t\t\t\tpatchee.UpdatedAt = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.UpdatedAt == nil {\n\t\t\t\tpatchee.UpdatedAt = &timestamp.Timestamp{}\n\t\t\t}\n\t\t\tchildMask := &field_mask.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"UpdatedAt.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm1.MergeWithMask(patcher.UpdatedAt, patchee.UpdatedAt, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tupdatedUpdatedAt = true\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx context.Context, patchee *HealthMenstruationDailyEntry, patcher *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfileId\" {\n\t\t\tpatchee.ProfileId = patcher.ProfileId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Day\" {\n\t\t\tpatchee.Day = patcher.Day\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"IntensityPercentage\" {\n\t\t\tpatchee.IntensityPercentage = patcher.IntensityPercentage\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Type\" {\n\t\t\tpatchee.Type = patcher.Type\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Manual\" {\n\t\t\tpatchee.Manual = patcher.Manual\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"BasedOnPrediction\" {\n\t\t\tpatchee.BasedOnPrediction = patcher.BasedOnPrediction\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultApplyFieldMaskProfile(ctx context.Context, patchee *Profile, patcher *Profile, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*Profile, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Notes\" {\n\t\t\tpatchee.Notes = patcher.Notes\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"FirstName\" {\n\t\t\tpatchee.FirstName = patcher.FirstName\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"LastName\" {\n\t\t\tpatchee.LastName = patcher.LastName\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"PrimaryEmail\" {\n\t\t\tpatchee.PrimaryEmail = patcher.PrimaryEmail\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Groups\" {\n\t\t\tpatchee.Groups = patcher.Groups\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfilePictureUrl\" {\n\t\t\tpatchee.ProfilePictureUrl = patcher.ProfilePictureUrl\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultPatchSetUserInfo(ctx context.Context, objects []*UserInfo, updateMasks []*field_mask.FieldMask, db *gorm.DB) ([]*UserInfo, error) {\n\tif len(objects) != len(updateMasks) {\n\t\treturn nil, fmt.Errorf(errors.BadRepeatedFieldMaskTpl, len(updateMasks), len(objects))\n\t}\n\n\tresults := make([]*UserInfo, 0, len(objects))\n\tfor i, patcher := range objects {\n\t\tpbResponse, err := DefaultPatchUserInfo(ctx, patcher, updateMasks[i], db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, pbResponse)\n\t}\n\n\treturn results, nil\n}", "func DefaultApplyFieldMaskOwner(ctx context.Context, patchee *Owner, patcher *Owner, updateMask *field_mask.FieldMask, prefix string, db *gorm.DB) (*Owner, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"FirstName\" {\n\t\t\tpatchee.FirstName = patcher.FirstName\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"LastName\" {\n\t\t\tpatchee.LastName = patcher.LastName\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Email\" {\n\t\t\tpatchee.Email = patcher.Email\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Country\" {\n\t\t\tpatchee.Country = patcher.Country\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultApplyFieldMaskLogActivity(ctx context.Context, patchee *LogActivity, patcher *LogActivity, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*LogActivity, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Ip\" {\n\t\t\tpatchee.Ip = patcher.Ip\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfileId\" {\n\t\t\tpatchee.ProfileId = patcher.ProfileId\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultApplyFieldMaskSettings(ctx context.Context, patchee *Settings, patcher *Settings, updateMask *field_mask.FieldMask, prefix string, db *gorm.DB) (*Settings, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors.NilArgumentError\n\t}\n\tvar err error\n\tvar updatedPaymentDetails bool\n\tvar updatedReturnPolicy bool\n\tvar updatedShippingPolicy bool\n\tfor i, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedPaymentDetails && strings.HasPrefix(f, prefix+\"PaymentDetails.\") {\n\t\t\tif patcher.PaymentDetails == nil {\n\t\t\t\tpatchee.PaymentDetails = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.PaymentDetails == nil {\n\t\t\t\tpatchee.PaymentDetails = &Settings_PaymentDetails{}\n\t\t\t}\n\t\t\tchildMask := &field_mask.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"PaymentDetails.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm1.MergeWithMask(patcher.PaymentDetails, patchee.PaymentDetails, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"PaymentDetails\" {\n\t\t\tupdatedPaymentDetails = true\n\t\t\tpatchee.PaymentDetails = patcher.PaymentDetails\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ShopPolicy\" {\n\t\t\tpatchee.ShopPolicy = patcher.ShopPolicy\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"PrivacyPolicy\" {\n\t\t\tpatchee.PrivacyPolicy = patcher.PrivacyPolicy\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedReturnPolicy && strings.HasPrefix(f, prefix+\"ReturnPolicy.\") {\n\t\t\tif patcher.ReturnPolicy == nil {\n\t\t\t\tpatchee.ReturnPolicy = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.ReturnPolicy == nil {\n\t\t\t\tpatchee.ReturnPolicy = &ReturnPolicy{}\n\t\t\t}\n\t\t\tchildMask := &field_mask.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"ReturnPolicy.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm1.MergeWithMask(patcher.ReturnPolicy, patchee.ReturnPolicy, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"ReturnPolicy\" {\n\t\t\tupdatedReturnPolicy = true\n\t\t\tpatchee.ReturnPolicy = patcher.ReturnPolicy\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedShippingPolicy && strings.HasPrefix(f, prefix+\"ShippingPolicy.\") {\n\t\t\tif patcher.ShippingPolicy == nil {\n\t\t\t\tpatchee.ShippingPolicy = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.ShippingPolicy == nil {\n\t\t\t\tpatchee.ShippingPolicy = &ShippingPolicy{}\n\t\t\t}\n\t\t\tchildMask := &field_mask.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"ShippingPolicy.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm1.MergeWithMask(patcher.ShippingPolicy, patchee.ShippingPolicy, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"ShippingPolicy\" {\n\t\t\tupdatedShippingPolicy = true\n\t\t\tpatchee.ShippingPolicy = patcher.ShippingPolicy\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultApplyFieldMaskMultiaccountTypeWithoutID(ctx context.Context, patchee *MultiaccountTypeWithoutID, patcher *MultiaccountTypeWithoutID, updateMask *field_mask.FieldMask, prefix string, db *gorm.DB) (*MultiaccountTypeWithoutID, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"SomeField\" {\n\t\t\tpatchee.SomeField = patcher.SomeField\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func WithOverrideFieldMask(d *presenceInterceptorOptionsDecorator) {\n\td.overrideFieldMask = true\n}", "func DefaultApplyFieldMaskAddress(ctx context.Context, patchee *Address, patcher *Address, updateMask *field_mask.FieldMask, prefix string, db *gorm.DB) (*Address, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Address\" {\n\t\t\tpatchee.Address = patcher.Address\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Unit\" {\n\t\t\tpatchee.Unit = patcher.Unit\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ZipCode\" {\n\t\t\tpatchee.ZipCode = patcher.ZipCode\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"City\" {\n\t\t\tpatchee.City = patcher.City\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"State\" {\n\t\t\tpatchee.State = patcher.State\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Longitude\" {\n\t\t\tpatchee.Longitude = patcher.Longitude\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Lattitude\" {\n\t\t\tpatchee.Lattitude = patcher.Lattitude\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultApplyFieldMaskMultiaccountTypeWithID(ctx context.Context, patchee *MultiaccountTypeWithID, patcher *MultiaccountTypeWithID, updateMask *field_mask.FieldMask, prefix string, db *gorm.DB) (*MultiaccountTypeWithID, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"SomeField\" {\n\t\t\tpatchee.SomeField = patcher.SomeField\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultPatchUserInfo(ctx context.Context, in *UserInfo, updateMask *field_mask.FieldMask, db *gorm.DB) (*UserInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors.NilArgumentError\n\t}\n\tvar pbObj UserInfo\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(UserInfoWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadUserInfo(ctx, &UserInfo{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(UserInfoWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskUserInfo(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(UserInfoWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateUserInfo(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(UserInfoWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func DefaultApplyFieldMaskMerchantAccount(ctx context.Context, patchee *MerchantAccount, patcher *MerchantAccount, updateMask *field_mask.FieldMask, prefix string, db *gorm.DB) (*MerchantAccount, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors.NilArgumentError\n\t}\n\tvar err error\n\tvar updatedAddress bool\n\tvar updatedShopSettings bool\n\tfor i, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Owners\" {\n\t\t\tpatchee.Owners = patcher.Owners\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"BusinessName\" {\n\t\t\tpatchee.BusinessName = patcher.BusinessName\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"BusinessEmail\" {\n\t\t\tpatchee.BusinessEmail = patcher.BusinessEmail\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"EmployerId\" {\n\t\t\tpatchee.EmployerId = patcher.EmployerId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"EstimateAnnualRevenue\" {\n\t\t\tpatchee.EstimateAnnualRevenue = patcher.EstimateAnnualRevenue\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedAddress && strings.HasPrefix(f, prefix+\"Address.\") {\n\t\t\tupdatedAddress = true\n\t\t\tif patcher.Address == nil {\n\t\t\t\tpatchee.Address = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.Address == nil {\n\t\t\t\tpatchee.Address = &Address{}\n\t\t\t}\n\t\t\tif o, err := DefaultApplyFieldMaskAddress(ctx, patchee.Address, patcher.Address, &field_mask.FieldMask{Paths: updateMask.Paths[i:]}, prefix+\"Address.\", db); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t} else {\n\t\t\t\tpatchee.Address = o\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Address\" {\n\t\t\tupdatedAddress = true\n\t\t\tpatchee.Address = patcher.Address\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ItemsOrServicesSold\" {\n\t\t\tpatchee.ItemsOrServicesSold = patcher.ItemsOrServicesSold\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"FulfillmentOptions\" {\n\t\t\tpatchee.FulfillmentOptions = patcher.FulfillmentOptions\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedShopSettings && strings.HasPrefix(f, prefix+\"ShopSettings.\") {\n\t\t\tupdatedShopSettings = true\n\t\t\tif patcher.ShopSettings == nil {\n\t\t\t\tpatchee.ShopSettings = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.ShopSettings == nil {\n\t\t\t\tpatchee.ShopSettings = &Settings{}\n\t\t\t}\n\t\t\tif o, err := DefaultApplyFieldMaskSettings(ctx, patchee.ShopSettings, patcher.ShopSettings, &field_mask.FieldMask{Paths: updateMask.Paths[i:]}, prefix+\"ShopSettings.\", db); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t} else {\n\t\t\t\tpatchee.ShopSettings = o\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ShopSettings\" {\n\t\t\tupdatedShopSettings = true\n\t\t\tpatchee.ShopSettings = patcher.ShopSettings\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"SupportedCauses\" {\n\t\t\tpatchee.SupportedCauses = patcher.SupportedCauses\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Bio\" {\n\t\t\tpatchee.Bio = patcher.Bio\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Headline\" {\n\t\t\tpatchee.Headline = patcher.Headline\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"PhoneNumber\" {\n\t\t\tpatchee.PhoneNumber = patcher.PhoneNumber\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Tags\" {\n\t\t\tpatchee.Tags = patcher.Tags\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"StripeConnectedAccountId\" {\n\t\t\tpatchee.StripeConnectedAccountId = patcher.StripeConnectedAccountId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"StripeAccountId\" {\n\t\t\tpatchee.StripeAccountId = patcher.StripeAccountId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"AuthnAccountId\" {\n\t\t\tpatchee.AuthnAccountId = patcher.AuthnAccountId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"AccountOnboardingDetails\" {\n\t\t\tpatchee.AccountOnboardingDetails = patcher.AccountOnboardingDetails\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"AccountOnboardingState\" {\n\t\t\tpatchee.AccountOnboardingState = patcher.AccountOnboardingState\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"AccountType\" {\n\t\t\tpatchee.AccountType = patcher.AccountType\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"IsActive\" {\n\t\t\tpatchee.IsActive = patcher.IsActive\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"BusinessType\" {\n\t\t\tpatchee.BusinessType = patcher.BusinessType\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Country\" {\n\t\t\tpatchee.Country = patcher.Country\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"DefaultCurrency\" {\n\t\t\tpatchee.DefaultCurrency = patcher.DefaultCurrency\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultApplyFieldMaskIntPoint(ctx context.Context, patchee *IntPoint, patcher *IntPoint, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*IntPoint, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors.New(\"Patchee inputs to DefaultApplyFieldMaskIntPoint must be non-nil\")\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"X\" {\n\t\t\tpatchee.X = patcher.X\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Y\" {\n\t\t\tpatchee.Y = patcher.Y\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultPatchSetHealthMenstruationDailyEntry(ctx context.Context, objects []*HealthMenstruationDailyEntry, updateMasks []*field_mask1.FieldMask, db *gorm1.DB) ([]*HealthMenstruationDailyEntry, error) {\n\tif len(objects) != len(updateMasks) {\n\t\treturn nil, fmt.Errorf(errors1.BadRepeatedFieldMaskTpl, len(updateMasks), len(objects))\n\t}\n\n\tresults := make([]*HealthMenstruationDailyEntry, 0, len(objects))\n\tfor i, patcher := range objects {\n\t\tpbResponse, err := DefaultPatchHealthMenstruationDailyEntry(ctx, patcher, updateMasks[i], db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, pbResponse)\n\t}\n\n\treturn results, nil\n}", "func DefaultApplyFieldMaskTags(ctx context.Context, patchee *Tags, patcher *Tags, updateMask *field_mask.FieldMask, prefix string, db *gorm.DB) (*Tags, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"TagName\" {\n\t\t\tpatchee.TagName = patcher.TagName\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"TagDescription\" {\n\t\t\tpatchee.TagDescription = patcher.TagDescription\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Metadata\" {\n\t\t\tpatchee.Metadata = patcher.Metadata\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultPatchSetProfile(ctx context.Context, objects []*Profile, updateMasks []*field_mask1.FieldMask, db *gorm1.DB) ([]*Profile, error) {\n\tif len(objects) != len(updateMasks) {\n\t\treturn nil, fmt.Errorf(errors1.BadRepeatedFieldMaskTpl, len(updateMasks), len(objects))\n\t}\n\n\tresults := make([]*Profile, 0, len(objects))\n\tfor i, patcher := range objects {\n\t\tpbResponse, err := DefaultPatchProfile(ctx, patcher, updateMasks[i], db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, pbResponse)\n\t}\n\n\treturn results, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DefaultListHealthMenstruationPersonalInfo executes a gorm list call
func DefaultListHealthMenstruationPersonalInfo(ctx context.Context, db *gorm1.DB, f *query1.Filtering, s *query1.Sorting, p *query1.Pagination, fs *query1.FieldSelection) ([]*HealthMenstruationPersonalInfo, error) { in := HealthMenstruationPersonalInfo{} ormObj, err := in.ToORM(ctx) if err != nil { return nil, err } if hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeListApplyQuery); ok { if db, err = hook.BeforeListApplyQuery(ctx, db, f, s, p, fs); err != nil { return nil, err } } db, err = gorm2.ApplyCollectionOperators(ctx, db, &HealthMenstruationPersonalInfoORM{}, &HealthMenstruationPersonalInfo{}, f, s, p, fs) if err != nil { return nil, err } if hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeListFind); ok { if db, err = hook.BeforeListFind(ctx, db, f, s, p, fs); err != nil { return nil, err } } db = db.Where(&ormObj) db = db.Order("id") ormResponse := []HealthMenstruationPersonalInfoORM{} if err := db.Find(&ormResponse).Error; err != nil { return nil, err } if hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterListFind); ok { if err = hook.AfterListFind(ctx, db, &ormResponse, f, s, p, fs); err != nil { return nil, err } } pbResponse := []*HealthMenstruationPersonalInfo{} for _, responseEntry := range ormResponse { temp, err := responseEntry.ToPB(ctx) if err != nil { return nil, err } pbResponse = append(pbResponse, &temp) } return pbResponse, nil }
[ "func DefaultReadHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ormObj.Id == 0 {\n\t\treturn nil, errors1.EmptyIdError\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeReadApplyQuery); ok {\n\t\tif db, err = hook.BeforeReadApplyQuery(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif db, err = gorm2.ApplyFieldSelection(ctx, db, nil, &HealthMenstruationPersonalInfoORM{}); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeReadFind); ok {\n\t\tif db, err = hook.BeforeReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tormResponse := HealthMenstruationPersonalInfoORM{}\n\tif err = db.Where(&ormObj).First(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormResponse).(HealthMenstruationPersonalInfoORMWithAfterReadFind); ok {\n\t\tif err = hook.AfterReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormResponse.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultListHealthMenstruationDailyEntry(ctx context.Context, db *gorm1.DB, f *query1.Filtering, s *query1.Sorting, p *query1.Pagination, fs *query1.FieldSelection) ([]*HealthMenstruationDailyEntry, error) {\n\tin := HealthMenstruationDailyEntry{}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeListApplyQuery); ok {\n\t\tif db, err = hook.BeforeListApplyQuery(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb, err = gorm2.ApplyCollectionOperators(ctx, db, &HealthMenstruationDailyEntryORM{}, &HealthMenstruationDailyEntry{}, f, s, p, fs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeListFind); ok {\n\t\tif db, err = hook.BeforeListFind(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb = db.Where(&ormObj)\n\tdb = db.Order(\"id\")\n\tormResponse := []HealthMenstruationDailyEntryORM{}\n\tif err := db.Find(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterListFind); ok {\n\t\tif err = hook.AfterListFind(ctx, db, &ormResponse, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse := []*HealthMenstruationDailyEntry{}\n\tfor _, responseEntry := range ormResponse {\n\t\ttemp, err := responseEntry.ToPB(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbResponse = append(pbResponse, &temp)\n\t}\n\treturn pbResponse, nil\n}", "func DefaultListUserInfo(ctx context.Context, db *gorm.DB) ([]*UserInfo, error) {\n\tin := UserInfo{}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(UserInfoORMWithBeforeListApplyQuery); ok {\n\t\tif db, err = hook.BeforeListApplyQuery(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb, err = gorm1.ApplyCollectionOperators(ctx, db, &UserInfoORM{}, &UserInfo{}, nil, nil, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(UserInfoORMWithBeforeListFind); ok {\n\t\tif db, err = hook.BeforeListFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb = db.Where(&ormObj)\n\tdb = db.Order(\"id\")\n\tormResponse := []UserInfoORM{}\n\tif err := db.Find(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(UserInfoORMWithAfterListFind); ok {\n\t\tif err = hook.AfterListFind(ctx, db, &ormResponse); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse := []*UserInfo{}\n\tfor _, responseEntry := range ormResponse {\n\t\ttemp, err := responseEntry.ToPB(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbResponse = append(pbResponse, &temp)\n\t}\n\treturn pbResponse, nil\n}", "func DefaultCreateHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeCreate_); ok {\n\t\tif db, err = hook.BeforeCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Create(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterCreate_); ok {\n\t\tif err = hook.AfterCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func (m *ListHealthMenstruationPersonalInfoRequest) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif v, ok := interface{}(m.GetFilter()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn ListHealthMenstruationPersonalInfoRequestValidationError{\n\t\t\t\tfield: \"Filter\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetOrderBy()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn ListHealthMenstruationPersonalInfoRequestValidationError{\n\t\t\t\tfield: \"OrderBy\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetFields()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn ListHealthMenstruationPersonalInfoRequestValidationError{\n\t\t\t\tfield: \"Fields\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetPaging()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn ListHealthMenstruationPersonalInfoRequestValidationError{\n\t\t\t\tfield: \"Paging\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func DefaultPatchSetHealthMenstruationPersonalInfo(ctx context.Context, objects []*HealthMenstruationPersonalInfo, updateMasks []*field_mask1.FieldMask, db *gorm1.DB) ([]*HealthMenstruationPersonalInfo, error) {\n\tif len(objects) != len(updateMasks) {\n\t\treturn nil, fmt.Errorf(errors1.BadRepeatedFieldMaskTpl, len(updateMasks), len(objects))\n\t}\n\n\tresults := make([]*HealthMenstruationPersonalInfo, 0, len(objects))\n\tfor i, patcher := range objects {\n\t\tpbResponse, err := DefaultPatchHealthMenstruationPersonalInfo(ctx, patcher, updateMasks[i], db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, pbResponse)\n\t}\n\n\treturn results, nil\n}", "func (m *ListHealthMenstruationPersonalInfoResponse) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tfor idx, item := range m.GetResults() {\n\t\t_, _ = idx, item\n\n\t\tif v, ok := interface{}(item).(interface{ Validate() error }); ok {\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\treturn ListHealthMenstruationPersonalInfoResponseValidationError{\n\t\t\t\t\tfield: fmt.Sprintf(\"Results[%v]\", idx),\n\t\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\t\tcause: err,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn nil\n}", "func DefaultPatchHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar pbObj HealthMenstruationPersonalInfo\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadHealthMenstruationPersonalInfo(ctx, &HealthMenstruationPersonalInfo{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskHealthMenstruationPersonalInfo(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateHealthMenstruationPersonalInfo(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(HealthMenstruationPersonalInfoWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func (srv *UsersService) ListHandler(ctx *gin.Context) {\n\tlogger := srv.logger.New(\"action\", \"ListHandler\")\n\n\tcurrentUser := GetCurrentUser(ctx)\n\n\tlimitQuery := ctx.DefaultQuery(\"limit\", \"10\")\n\tpageQuery := ctx.DefaultQuery(\"page\", \"1\")\n\tparams := ctx.Request.URL.Query()\n\n\tvar adminsRoleIncluded = false\n\n\troles := params[\"filter[role_name]\"]\n\tif len(roles) > 0 {\n\t\tfor key, role := range roles {\n\t\t\t// remove root from role names if user is not root\n\t\t\t// only root can see root users\n\t\t\tif role == models.RoleRoot && currentUser.RoleName != models.RoleRoot {\n\t\t\t\tcopy(roles[key:], roles[key+1:])\n\t\t\t\troles[len(roles)-1] = \"\"\n\t\t\t\troles = roles[:len(roles)-1]\n\t\t\t}\n\t\t\tif role == models.RoleRoot || role == models.RoleAdmin {\n\t\t\t\tadminsRoleIncluded = true\n\t\t\t}\n\t\t}\n\t} else {\n\t\tadminsRoleIncluded = true\n\t}\n\n\tvar hasPerm bool\n\tif adminsRoleIncluded {\n\t\thasPerm = srv.PermissionsService.CanViewAdminProfile(currentUser.UID)\n\t} else {\n\t\thasPerm = srv.PermissionsService.CanViewUserProfile(currentUser.UID)\n\t}\n\n\tif !hasPerm {\n\t\tsrv.ResponseService.Forbidden(ctx)\n\t\treturn\n\t}\n\n\tquery := srv.Repository.GetUsersRepository().Filter(params)\n\n\tpagination, err := srv.Repository.GetUsersRepository().Paginate(query, pageQuery, limitQuery, serializers.NewUsers())\n\tif err != nil {\n\t\tlogger.Error(\"сan't load list of user\", \"error\", err)\n\t\t// Returns a \"400 StatusBadRequest\" response\n\t\tsrv.ResponseService.Error(ctx, responses.CannotRetrieveCollection, \"Can't load list of users\")\n\t\treturn\n\t}\n\n\t// Returns a \"200 OK\" response\n\tsrv.ResponseService.OkResponse(ctx, pagination)\n}", "func (client PermissionsClient) ListByBillingProfileResponder(resp *http.Response) (result PermissionsListResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (hh *HealthCheckHandler) List(w http.ResponseWriter, r *http.Request) {\n\tqueryParams := r.URL.Query()\n\tpage, err := strconv.Atoi(queryParams[\"page\"][0])\n\tif err != nil {\n\t\thttp.Error(w, marshalError(err.Error()), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tlist := hh.db.List()\n\tsort.Sort(models.HealthChecks(list))\n\tstart, end := paginate(page, 10, len(list))\n\tpaginated := list[start:end]\n\n\tres := &models.HealthCheckList{\n\t\tItems: paginated,\n\t\tTotal: len(list),\n\t\tPage: page,\n\t\tSize: 10,\n\t}\n\n\tb, err := json.Marshal(res)\n\tif err != nil {\n\t\thttp.Error(w, marshalError(err.Error()), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Write(b)\n}", "func HlistAllUsers(httpwriter http.ResponseWriter, req *http.Request) {\n\n\tdefer req.Body.Close()\n\tbodybyte, _ := ioutil.ReadAll(req.Body)\n\n\ttype dcUserDetails struct {\n\t\tEmail string // User ID/ Email\n\t\tIsAdmin string //\n\t\tApplicationID string //\n\t\tStatus string //\n\t\tUserType string //\n\t}\n\n\tvar objtoaction dcUserDetails\n\terr = json.Unmarshal(bodybyte, &objtoaction)\n\n\tcredentials := security.Credentials{}\n\tcredentials.UserID = strings.ToUpper(objtoaction.Email)\n\n\tusercredentials, resfind := security.UsersGetAll()\n\tif resfind == \"200 OK\" {\n\t\t// All good\n\t} else {\n\t\t// usercredentials.ApplicationID = \"User Not found\"\n\t\treturn\n\t}\n\n\tjson.NewEncoder(httpwriter).Encode(&usercredentials)\n\treturn\n\n}", "func (sp *serviceProvider) List(conn orm.Connection) ([]Staff, error) {\n\tlist := []Staff{}\n\n\tdb := conn.(*gorm.DB).Exec(\"USE staff\")\n\terr := db.Model(list).Find(&list).Error\n\n\tif err != nil {\n\t\treturn list, err\n\t}\n\n\treturn list, nil\n}", "func (h *Handlers) ListHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == \"POST\" {\n\t\tfirstnameInput := strings.Trim(r.FormValue(\"firstname\"), \" \")\n\t\tlastnameInput := strings.Trim(r.FormValue(\"lastname\"), \" \")\n\n\t\tif h.ValidInput.MatchString(firstnameInput) && h.ValidInput.MatchString(lastnameInput) {\n\t\t\tperson := entity.Person{\n\t\t\t\tFirstname: firstnameInput,\n\t\t\t\tLastname: lastnameInput,\n\t\t\t}\n\t\t\th.DBClient.InsertPerson(person)\n\t\t}\n\n\t\thttp.Redirect(w, r, \"/\", http.StatusMovedPermanently)\n\t}\n\n\tpersons, err := h.DBClient.GetPersons()\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Println(err)\n\t}\n\n\terr = h.ListTemplate.ExecuteTemplate(w, \"layout\", struct{ Persons []entity.Person }{persons})\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Println(err)\n\t}\n}", "func DefaultListProfile(ctx context.Context, db *gorm1.DB, f *query1.Filtering, s *query1.Sorting, p *query1.Pagination, fs *query1.FieldSelection) ([]*Profile, error) {\n\tin := Profile{}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(ProfileORMWithBeforeListApplyQuery); ok {\n\t\tif db, err = hook.BeforeListApplyQuery(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb, err = gorm2.ApplyCollectionOperators(ctx, db, &ProfileORM{}, &Profile{}, f, s, p, fs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(ProfileORMWithBeforeListFind); ok {\n\t\tif db, err = hook.BeforeListFind(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb = db.Where(&ormObj)\n\tdb = db.Order(\"id\")\n\tormResponse := []ProfileORM{}\n\tif err := db.Find(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(ProfileORMWithAfterListFind); ok {\n\t\tif err = hook.AfterListFind(ctx, db, &ormResponse, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse := []*Profile{}\n\tfor _, responseEntry := range ormResponse {\n\t\ttemp, err := responseEntry.ToPB(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbResponse = append(pbResponse, &temp)\n\t}\n\treturn pbResponse, nil\n}", "func (s *service) List(ctx context.Context, req *basiccrud.ListRequest) (*basiccrud.ListResponse, error) {\n\tres := &basiccrud.ListResponse{\n\t\tFullnames: s.Fullname,\n\t}\n\treturn res, nil\n}", "func (a *App) GetAllFamousPersons(w http.ResponseWriter, r *http.Request) {\n handler.GetAllFamousPersons(a.DB, w, r)\n}", "func DefaultApplyFieldMaskHealthMenstruationPersonalInfo(ctx context.Context, patchee *HealthMenstruationPersonalInfo, patcher *HealthMenstruationPersonalInfo, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfileId\" {\n\t\t\tpatchee.ProfileId = patcher.ProfileId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"PeriodLengthInDays\" {\n\t\t\tpatchee.PeriodLengthInDays = patcher.PeriodLengthInDays\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CycleLengthInDays\" {\n\t\t\tpatchee.CycleLengthInDays = patcher.CycleLengthInDays\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func ListPersons(c *gin.Context) {\n\tvar persons []models.Person\n\tif err := models.DB.Find(&persons).Error; err != nil {\n\t\tc.JSON(http.StatusInternalServerError, ListPersonsResponse{Error: err.Error()})\n\t}\n\toutputPersons := make([]Person, 0, len(persons))\n\tfor _, p := range persons {\n\t\toutputPersons = append(outputPersons, personFromModel(&p))\n\t}\n\n\tc.JSON(http.StatusOK, ListPersonsResponse{Data: outputPersons})\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DefaultCreateHealthMenstruationDailyEntry executes a basic gorm create call
func DefaultCreateHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) { if in == nil { return nil, errors1.NilArgumentError } ormObj, err := in.ToORM(ctx) if err != nil { return nil, err } if hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeCreate_); ok { if db, err = hook.BeforeCreate_(ctx, db); err != nil { return nil, err } } if err = db.Create(&ormObj).Error; err != nil { return nil, err } if hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterCreate_); ok { if err = hook.AfterCreate_(ctx, db); err != nil { return nil, err } } pbResponse, err := ormObj.ToPB(ctx) return &pbResponse, err }
[ "func DefaultCreateHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeCreate_); ok {\n\t\tif db, err = hook.BeforeCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Create(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterCreate_); ok {\n\t\tif err = hook.AfterCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultReadHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ormObj.Id == 0 {\n\t\treturn nil, errors1.EmptyIdError\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeReadApplyQuery); ok {\n\t\tif db, err = hook.BeforeReadApplyQuery(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif db, err = gorm2.ApplyFieldSelection(ctx, db, nil, &HealthMenstruationDailyEntryORM{}); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeReadFind); ok {\n\t\tif db, err = hook.BeforeReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tormResponse := HealthMenstruationDailyEntryORM{}\n\tif err = db.Where(&ormObj).First(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormResponse).(HealthMenstruationDailyEntryORMWithAfterReadFind); ok {\n\t\tif err = hook.AfterReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormResponse.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultListHealthMenstruationDailyEntry(ctx context.Context, db *gorm1.DB, f *query1.Filtering, s *query1.Sorting, p *query1.Pagination, fs *query1.FieldSelection) ([]*HealthMenstruationDailyEntry, error) {\n\tin := HealthMenstruationDailyEntry{}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeListApplyQuery); ok {\n\t\tif db, err = hook.BeforeListApplyQuery(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb, err = gorm2.ApplyCollectionOperators(ctx, db, &HealthMenstruationDailyEntryORM{}, &HealthMenstruationDailyEntry{}, f, s, p, fs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeListFind); ok {\n\t\tif db, err = hook.BeforeListFind(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb = db.Where(&ormObj)\n\tdb = db.Order(\"id\")\n\tormResponse := []HealthMenstruationDailyEntryORM{}\n\tif err := db.Find(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterListFind); ok {\n\t\tif err = hook.AfterListFind(ctx, db, &ormResponse, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse := []*HealthMenstruationDailyEntry{}\n\tfor _, responseEntry := range ormResponse {\n\t\ttemp, err := responseEntry.ToPB(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbResponse = append(pbResponse, &temp)\n\t}\n\treturn pbResponse, nil\n}", "func DefaultPatchHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar pbObj HealthMenstruationDailyEntry\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadHealthMenstruationDailyEntry(ctx, &HealthMenstruationDailyEntry{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateHealthMenstruationDailyEntry(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(HealthMenstruationDailyEntryWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func DefaultStrictUpdateHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, fmt.Errorf(\"Nil argument to DefaultStrictUpdateHealthMenstruationDailyEntry\")\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlockedRow := &HealthMenstruationDailyEntryORM{}\n\tdb.Model(&ormObj).Set(\"gorm:query_option\", \"FOR UPDATE\").Where(\"id=?\", ormObj.Id).First(lockedRow)\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateCleanup); ok {\n\t\tif db, err = hook.BeforeStrictUpdateCleanup(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateSave); ok {\n\t\tif db, err = hook.BeforeStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Save(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterStrictUpdateSave); ok {\n\t\tif err = hook.AfterStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &pbResponse, err\n}", "func (m *HealthMenstruationDailyEntry) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for Id\n\n\tif v, ok := interface{}(m.GetCreatedAt()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationDailyEntryValidationError{\n\t\t\t\tfield: \"CreatedAt\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetUpdatedAt()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationDailyEntryValidationError{\n\t\t\t\tfield: \"UpdatedAt\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\t// no validation rules for ProfileId\n\n\tif v, ok := interface{}(m.GetDay()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationDailyEntryValidationError{\n\t\t\t\tfield: \"Day\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\t// no validation rules for IntensityPercentage\n\n\t// no validation rules for Type\n\n\t// no validation rules for Manual\n\n\t// no validation rules for BasedOnPrediction\n\n\treturn nil\n}", "func NewCreateMailerEntryDefault(code int) *CreateMailerEntryDefault {\n\tif code <= 0 {\n\t\tcode = 500\n\t}\n\n\treturn &CreateMailerEntryDefault{\n\t\t_statusCode: code,\n\t}\n}", "func DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx context.Context, patchee *HealthMenstruationDailyEntry, patcher *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfileId\" {\n\t\t\tpatchee.ProfileId = patcher.ProfileId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Day\" {\n\t\t\tpatchee.Day = patcher.Day\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"IntensityPercentage\" {\n\t\t\tpatchee.IntensityPercentage = patcher.IntensityPercentage\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Type\" {\n\t\t\tpatchee.Type = patcher.Type\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Manual\" {\n\t\t\tpatchee.Manual = patcher.Manual\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"BasedOnPrediction\" {\n\t\t\tpatchee.BasedOnPrediction = patcher.BasedOnPrediction\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func (s *Service) CreateEntry(ctx context.Context, in *pb.CreateEntryRequest) (*pb.Entry, error) {\n\tcurrentUser, err := s.getCurrentUser(ctx)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.PermissionDenied, \"Authentication failed\")\n\t}\n\n\tvar year int\n\terr = s.db.Get(&year, \"select year from calendars where id = ?\", in.GetCalendarId())\n\tif err == sql.ErrNoRows {\n\t\treturn nil, status.Errorf(codes.NotFound, \"Calendar not found\")\n\t}\n\tif err != nil {\n\t\treturn nil, xerrors.Errorf(\"Failed query to fetch calendar: %w\", err)\n\t}\n\n\tday := in.GetDay()\n\tif day < 1 || day > 25 {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"Invalid day: %d\", day)\n\t}\n\n\tlastID, err := s.insertEntry(currentUser.ID, in.GetCalendarId(), day)\n\tif err != nil {\n\t\treturn nil, xerrors.Errorf(\"Failed to insert entry: %w\", err)\n\t}\n\n\tvar entryID int64\n\terr = s.db.Get(&entryID, \"select id from entries where id = ?\", lastID)\n\tif err != nil {\n\t\treturn nil, xerrors.Errorf(\"Failed query to fetch entry: %w\", err)\n\t}\n\n\treturn &pb.Entry{Id: entryID}, nil\n}", "func DefaultPatchSetHealthMenstruationDailyEntry(ctx context.Context, objects []*HealthMenstruationDailyEntry, updateMasks []*field_mask1.FieldMask, db *gorm1.DB) ([]*HealthMenstruationDailyEntry, error) {\n\tif len(objects) != len(updateMasks) {\n\t\treturn nil, fmt.Errorf(errors1.BadRepeatedFieldMaskTpl, len(updateMasks), len(objects))\n\t}\n\n\tresults := make([]*HealthMenstruationDailyEntry, 0, len(objects))\n\tfor i, patcher := range objects {\n\t\tpbResponse, err := DefaultPatchHealthMenstruationDailyEntry(ctx, patcher, updateMasks[i], db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, pbResponse)\n\t}\n\n\treturn results, nil\n}", "func (m *CreateHealthMenstruationDailyEntryRequest) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif v, ok := interface{}(m.GetPayload()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn CreateHealthMenstruationDailyEntryRequestValidationError{\n\t\t\t\tfield: \"Payload\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (dbHandler *Handler) CreateEntry(userID uint, text string, ts time.Time) (api.Entry, error) {\n\tentry := api.Entry{UserID: userID, Text: text}\n\tif !ts.IsZero() {\n\t\tentry.CreatedAt = ts\n\t\tentry.UpdatedAt = ts\n\t}\n\n\tdb := dbHandler.DB.Create(&entry)\n\tif db.Error != nil {\n\t\treturn entry, errors.WrapWithDetails(db.Error, \"cannot create entry\", \"userID\", userID)\n\t}\n\n\treturn entry, nil\n}", "func CreateMeeting(c *gin.Context) {\n // Validate input\n var input CreateMeetingInput\n if err := c.ShouldBindJSON(&input); err != nil {\n c.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n return\n }\n\n // Create meeting\n meeting := models.Meeting{CreatedBy: input.CreatedBy, Title: input.Title, Description: input.Description, StartDate: input.StartDate, EndDate: input.EndDate, Location: input.Location}\n models.DB.Create(&meeting)\n\n c.JSON(http.StatusOK, gin.H{\"data\": meeting})\n}", "func CreateAppointment(c *gin.Context) {\n // Validate input\n var input CreateAppointmentInput\n if err := c.ShouldBindJSON(&input); err != nil {\n c.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n return\n }\n\n // Create appointment\n appointment := models.Appointment{Name: input.Name, Designation: input.Designation, Time: input.Time}\n models.DB.Create(&appointment)\n\n c.JSON(http.StatusOK, gin.H{\"data\": appointment})\n}", "func DateNewlyAddCreate() bool {\n\tlogging.Info(\"data newly add data handle starting..., method: \", system.RunFuncName())\n\n\tdateNewlyAdd := GetNewlyAddData()\n\tif err := dsprecorddao.InsertOne(dateNewlyAdd); err != nil {\n\t\tlogging.Info(\"newly create failed :\", err.Error(), \"method: \", system.RunFuncName())\n\t\treturn false\n\t}\n\tlogging.Info(\"data newly add data handle ending... method: \", system.RunFuncName())\n\treturn true\n}", "func (m *MeetupsRepo) Create(meetup *models.Meetup) (*models.Meetup, error) {\n _, err := m.DB.Model(meetup).Returning(\"*\").Insert()\n return meetup, err\n}", "func DefaultCreateLogActivity(ctx context.Context, in *LogActivity, db *gorm1.DB) (*LogActivity, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(LogActivityORMWithBeforeCreate_); ok {\n\t\tif db, err = hook.BeforeCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Create(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(LogActivityORMWithAfterCreate_); ok {\n\t\tif err = hook.AfterCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func (m *CreateHealthMenstruationDailyEntryResponse) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif v, ok := interface{}(m.GetResult()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn CreateHealthMenstruationDailyEntryResponseValidationError{\n\t\t\t\tfield: \"Result\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func CreateSchedule(c *gin.Context) {\n // Validate input\n var input CreateAppointmentInput\n if err := c.ShouldBindJSON(&input); err != nil {\n c.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n return\n }\n\n // Create appointment\n appointment := models.Appointment{Name: input.Name, Designation: input.Designation, Time: input.Time}\n models.DB.Create(&appointment)\n\n c.JSON(http.StatusOK, gin.H{\"data\": appointment})\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DefaultReadHealthMenstruationDailyEntry executes a basic gorm read call
func DefaultReadHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) { if in == nil { return nil, errors1.NilArgumentError } ormObj, err := in.ToORM(ctx) if err != nil { return nil, err } if ormObj.Id == 0 { return nil, errors1.EmptyIdError } if hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeReadApplyQuery); ok { if db, err = hook.BeforeReadApplyQuery(ctx, db); err != nil { return nil, err } } if db, err = gorm2.ApplyFieldSelection(ctx, db, nil, &HealthMenstruationDailyEntryORM{}); err != nil { return nil, err } if hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeReadFind); ok { if db, err = hook.BeforeReadFind(ctx, db); err != nil { return nil, err } } ormResponse := HealthMenstruationDailyEntryORM{} if err = db.Where(&ormObj).First(&ormResponse).Error; err != nil { return nil, err } if hook, ok := interface{}(&ormResponse).(HealthMenstruationDailyEntryORMWithAfterReadFind); ok { if err = hook.AfterReadFind(ctx, db); err != nil { return nil, err } } pbResponse, err := ormResponse.ToPB(ctx) return &pbResponse, err }
[ "func DefaultListHealthMenstruationDailyEntry(ctx context.Context, db *gorm1.DB, f *query1.Filtering, s *query1.Sorting, p *query1.Pagination, fs *query1.FieldSelection) ([]*HealthMenstruationDailyEntry, error) {\n\tin := HealthMenstruationDailyEntry{}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeListApplyQuery); ok {\n\t\tif db, err = hook.BeforeListApplyQuery(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb, err = gorm2.ApplyCollectionOperators(ctx, db, &HealthMenstruationDailyEntryORM{}, &HealthMenstruationDailyEntry{}, f, s, p, fs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeListFind); ok {\n\t\tif db, err = hook.BeforeListFind(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb = db.Where(&ormObj)\n\tdb = db.Order(\"id\")\n\tormResponse := []HealthMenstruationDailyEntryORM{}\n\tif err := db.Find(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterListFind); ok {\n\t\tif err = hook.AfterListFind(ctx, db, &ormResponse, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse := []*HealthMenstruationDailyEntry{}\n\tfor _, responseEntry := range ormResponse {\n\t\ttemp, err := responseEntry.ToPB(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbResponse = append(pbResponse, &temp)\n\t}\n\treturn pbResponse, nil\n}", "func DefaultReadHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ormObj.Id == 0 {\n\t\treturn nil, errors1.EmptyIdError\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeReadApplyQuery); ok {\n\t\tif db, err = hook.BeforeReadApplyQuery(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif db, err = gorm2.ApplyFieldSelection(ctx, db, nil, &HealthMenstruationPersonalInfoORM{}); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeReadFind); ok {\n\t\tif db, err = hook.BeforeReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tormResponse := HealthMenstruationPersonalInfoORM{}\n\tif err = db.Where(&ormObj).First(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormResponse).(HealthMenstruationPersonalInfoORMWithAfterReadFind); ok {\n\t\tif err = hook.AfterReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormResponse.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultCreateHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeCreate_); ok {\n\t\tif db, err = hook.BeforeCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Create(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterCreate_); ok {\n\t\tif err = hook.AfterCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultPatchHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar pbObj HealthMenstruationDailyEntry\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadHealthMenstruationDailyEntry(ctx, &HealthMenstruationDailyEntry{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateHealthMenstruationDailyEntry(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(HealthMenstruationDailyEntryWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func DefaultStrictUpdateHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, fmt.Errorf(\"Nil argument to DefaultStrictUpdateHealthMenstruationDailyEntry\")\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlockedRow := &HealthMenstruationDailyEntryORM{}\n\tdb.Model(&ormObj).Set(\"gorm:query_option\", \"FOR UPDATE\").Where(\"id=?\", ormObj.Id).First(lockedRow)\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateCleanup); ok {\n\t\tif db, err = hook.BeforeStrictUpdateCleanup(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateSave); ok {\n\t\tif db, err = hook.BeforeStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Save(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterStrictUpdateSave); ok {\n\t\tif err = hook.AfterStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &pbResponse, err\n}", "func DefaultReadLogActivity(ctx context.Context, in *LogActivity, db *gorm1.DB) (*LogActivity, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ormObj.Id == 0 {\n\t\treturn nil, errors1.EmptyIdError\n\t}\n\tif hook, ok := interface{}(&ormObj).(LogActivityORMWithBeforeReadApplyQuery); ok {\n\t\tif db, err = hook.BeforeReadApplyQuery(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif db, err = gorm2.ApplyFieldSelection(ctx, db, nil, &LogActivityORM{}); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(LogActivityORMWithBeforeReadFind); ok {\n\t\tif db, err = hook.BeforeReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tormResponse := LogActivityORM{}\n\tif err = db.Where(&ormObj).First(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormResponse).(LogActivityORMWithAfterReadFind); ok {\n\t\tif err = hook.AfterReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormResponse.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx context.Context, patchee *HealthMenstruationDailyEntry, patcher *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfileId\" {\n\t\t\tpatchee.ProfileId = patcher.ProfileId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Day\" {\n\t\t\tpatchee.Day = patcher.Day\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"IntensityPercentage\" {\n\t\t\tpatchee.IntensityPercentage = patcher.IntensityPercentage\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Type\" {\n\t\t\tpatchee.Type = patcher.Type\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Manual\" {\n\t\t\tpatchee.Manual = patcher.Manual\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"BasedOnPrediction\" {\n\t\t\tpatchee.BasedOnPrediction = patcher.BasedOnPrediction\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultPatchSetHealthMenstruationDailyEntry(ctx context.Context, objects []*HealthMenstruationDailyEntry, updateMasks []*field_mask1.FieldMask, db *gorm1.DB) ([]*HealthMenstruationDailyEntry, error) {\n\tif len(objects) != len(updateMasks) {\n\t\treturn nil, fmt.Errorf(errors1.BadRepeatedFieldMaskTpl, len(updateMasks), len(objects))\n\t}\n\n\tresults := make([]*HealthMenstruationDailyEntry, 0, len(objects))\n\tfor i, patcher := range objects {\n\t\tpbResponse, err := DefaultPatchHealthMenstruationDailyEntry(ctx, patcher, updateMasks[i], db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, pbResponse)\n\t}\n\n\treturn results, nil\n}", "func (m *HealthMenstruationDailyEntry) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for Id\n\n\tif v, ok := interface{}(m.GetCreatedAt()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationDailyEntryValidationError{\n\t\t\t\tfield: \"CreatedAt\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetUpdatedAt()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationDailyEntryValidationError{\n\t\t\t\tfield: \"UpdatedAt\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\t// no validation rules for ProfileId\n\n\tif v, ok := interface{}(m.GetDay()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationDailyEntryValidationError{\n\t\t\t\tfield: \"Day\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\t// no validation rules for IntensityPercentage\n\n\t// no validation rules for Type\n\n\t// no validation rules for Manual\n\n\t// no validation rules for BasedOnPrediction\n\n\treturn nil\n}", "func GetData(w http.ResponseWriter, r *http.Request) {\n\tresult := dailyData{}\n\tc := utils.MONGOSESSION.DB(\"healthDB\").C(\"healthData\")\n\tc.Find(bson.M{\"date\": utils.GetDate(time.Now())}).One(&result)\n\tb, _ := json.Marshal(result)\n\tfmt.Fprintf(w, string(b))\n}", "func GetEntryAtDate(date interface{}, entry string) interface{} {\n\tisRequestedFieldEntryValid := false\n\tresult := dailyData{}\n\n\tc := utils.MONGOSESSION.DB(\"healthDB\").C(\"healthData\")\n\tc.Find(bson.M{\"date\": date}).One(&result)\n\tif entry != \"\" {\n\t\tavailableFields, _ := reflections.Fields(result.Data)\n\n\t\t// Check if the requested field entry is available at the requested date\n\t\tfor _, field := range availableFields {\n\t\t\tif entry == field {\n\t\t\t\tisRequestedFieldEntryValid = true\n\t\t\t}\n\t\t}\n\n\t\tif !isRequestedFieldEntryValid {\n\t\t\treturn nil\n\t\t}\n\n\t\trequestedData, err := reflections.GetField(result.Data, entry)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"%s\", err)\n\t\t}\n\t\treturn requestedData\n\t}\n\treturn result\n}", "func (lsm *lsm) Read(key string, ts uint64) (*Entry, error) {\n\tfor _, level := range lsm.levels {\n\t\tentry, err := level.Find(key, ts)\n\t\tif err != nil {\n\t\t\tswitch err.(type) {\n\t\t\tcase *ErrKeyNotFound:\n\t\t\t\tcontinue\n\t\t\tdefault:\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else {\n\t\t\treturn entry, nil\n\t\t}\n\t}\n\treturn nil, newErrKeyNotFound()\n}", "func DefaultReadTypeBecomesEmpty(ctx context.Context, in *TypeBecomesEmpty, db *gorm1.DB) (*TypeBecomesEmpty, error) {\n\tif in == nil {\n\t\treturn nil, fmt.Errorf(\"Nil argument to DefaultReadTypeBecomesEmpty\")\n\t}\n\tormParams := ConvertTypeBecomesEmptyToORM(*in)\n\tormResponse := TypeBecomesEmptyORM{}\n\tif err := db.Set(\"gorm:auto_preload\", true).Where(&ormParams).First(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tpbResponse := ConvertTypeBecomesEmptyFromORM(ormResponse)\n\treturn &pbResponse, nil\n}", "func DefaultListHealthMenstruationPersonalInfo(ctx context.Context, db *gorm1.DB, f *query1.Filtering, s *query1.Sorting, p *query1.Pagination, fs *query1.FieldSelection) ([]*HealthMenstruationPersonalInfo, error) {\n\tin := HealthMenstruationPersonalInfo{}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeListApplyQuery); ok {\n\t\tif db, err = hook.BeforeListApplyQuery(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb, err = gorm2.ApplyCollectionOperators(ctx, db, &HealthMenstruationPersonalInfoORM{}, &HealthMenstruationPersonalInfo{}, f, s, p, fs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeListFind); ok {\n\t\tif db, err = hook.BeforeListFind(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb = db.Where(&ormObj)\n\tdb = db.Order(\"id\")\n\tormResponse := []HealthMenstruationPersonalInfoORM{}\n\tif err := db.Find(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterListFind); ok {\n\t\tif err = hook.AfterListFind(ctx, db, &ormResponse, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse := []*HealthMenstruationPersonalInfo{}\n\tfor _, responseEntry := range ormResponse {\n\t\ttemp, err := responseEntry.ToPB(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbResponse = append(pbResponse, &temp)\n\t}\n\treturn pbResponse, nil\n}", "func (dao *baseDAO) ReadOne(id int32) {}", "func (e *ExpenseModel) ReadAll(filter interface{}) ([]Expense, error) {\n\tvar expenses []Expense\n\tcollection := e.db.Client.Database(e.db.DBName).Collection(\"expenses\")\n\tlog.Printf(\"filter: %v\\n\", filter)\n\t// sort the entries based on the `date` field\n\topts := options.FindOptions{}\n\topts.SetSort(bson.D{{\"date\", -1}})\n\tcur, err := collection.Find(context.TODO(), filter, &opts)\n\tif err != nil {\n\t\tlog.Printf(\"ERROR FINDING DATA: %v\\n\", err)\n\t\treturn expenses, err\n\t}\n\tfor cur.Next(context.TODO()) {\n\t\tvar expense Expense\n\t\terr = cur.Decode(&expense)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error on Decoding the document: %v\\n\", err)\n\t\t}\n\t\texpenses = append(expenses, expense)\n\t}\n\tlog.Printf(\"documentReturned: %v\\n\", expenses)\n\treturn expenses, nil\n}", "func (ps *Store) Read(ctx context.Context, key datastore.Key, entity datastore.Entity) error {\n\tc := GetCon(ctx)\n\temd := entity.GetEntityMetadata()\n\titer := c.Query(getJSONSelect(emd.GetName(), emd.GetIDColumnName()), key).Iter()\n\tvar json string\n\tvalid := iter.Scan(&json)\n\tif !valid {\n\t\treturn common.NewError(datastore.EntityNotFound, fmt.Sprintf(\"%v not found with id = %v\", emd.GetName(), key))\n\t}\n\tdatastore.FromJSON(json, entity)\n\tif err := iter.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (store TodoStore) Read(_ sqlx.Queryer, filters ...gtimer.TodoFilter) (gtimer.Todos, error) {\n\tvar todo gtimer.Todo\n\tfor _, filter := range filters {\n\t\tfilter(&todo)\n\t}\n\tif todo.ID != \"\" {\n\t\ttodo, err := store.Get(todo.ID)\n\t\tif err != nil {\n\t\t\treturn gtimer.Todos{}, err\n\t\t}\n\t\treturn gtimer.Todos{todo}, err\n\t}\n\tif todo.Status != \"\" {\n\t\treturn store.ByStatus(todo.Status)\n\t}\n\treturn store.All()\n}", "func (store TodoStore) Read(q sqlx.Queryer, filters ...gtimer.TodoFilter) (gtimer.Todos, error) {\n\tvar todo gtimer.Todo\n\tfor _, filter := range filters {\n\t\tfilter(&todo)\n\t}\n\tif todo.ID != \"\" {\n\t\ttodo, err := store.Get(q, todo.ID)\n\t\tif err != nil {\n\t\t\treturn gtimer.Todos{}, err\n\t\t}\n\t\treturn gtimer.Todos{todo}, err\n\t}\n\tif todo.Status != \"\" {\n\t\treturn store.ByStatus(q, todo.Status)\n\t}\n\treturn store.All(q)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DefaultStrictUpdateHealthMenstruationDailyEntry clears first level 1:many children and then executes a gorm update call
func DefaultStrictUpdateHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) { if in == nil { return nil, fmt.Errorf("Nil argument to DefaultStrictUpdateHealthMenstruationDailyEntry") } ormObj, err := in.ToORM(ctx) if err != nil { return nil, err } lockedRow := &HealthMenstruationDailyEntryORM{} db.Model(&ormObj).Set("gorm:query_option", "FOR UPDATE").Where("id=?", ormObj.Id).First(lockedRow) if hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateCleanup); ok { if db, err = hook.BeforeStrictUpdateCleanup(ctx, db); err != nil { return nil, err } } if hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateSave); ok { if db, err = hook.BeforeStrictUpdateSave(ctx, db); err != nil { return nil, err } } if err = db.Save(&ormObj).Error; err != nil { return nil, err } if hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterStrictUpdateSave); ok { if err = hook.AfterStrictUpdateSave(ctx, db); err != nil { return nil, err } } pbResponse, err := ormObj.ToPB(ctx) if err != nil { return nil, err } return &pbResponse, err }
[ "func DefaultPatchHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar pbObj HealthMenstruationDailyEntry\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadHealthMenstruationDailyEntry(ctx, &HealthMenstruationDailyEntry{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateHealthMenstruationDailyEntry(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(HealthMenstruationDailyEntryWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func DefaultPatchSetHealthMenstruationDailyEntry(ctx context.Context, objects []*HealthMenstruationDailyEntry, updateMasks []*field_mask1.FieldMask, db *gorm1.DB) ([]*HealthMenstruationDailyEntry, error) {\n\tif len(objects) != len(updateMasks) {\n\t\treturn nil, fmt.Errorf(errors1.BadRepeatedFieldMaskTpl, len(updateMasks), len(objects))\n\t}\n\n\tresults := make([]*HealthMenstruationDailyEntry, 0, len(objects))\n\tfor i, patcher := range objects {\n\t\tpbResponse, err := DefaultPatchHealthMenstruationDailyEntry(ctx, patcher, updateMasks[i], db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, pbResponse)\n\t}\n\n\treturn results, nil\n}", "func DefaultListHealthMenstruationDailyEntry(ctx context.Context, db *gorm1.DB, f *query1.Filtering, s *query1.Sorting, p *query1.Pagination, fs *query1.FieldSelection) ([]*HealthMenstruationDailyEntry, error) {\n\tin := HealthMenstruationDailyEntry{}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeListApplyQuery); ok {\n\t\tif db, err = hook.BeforeListApplyQuery(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb, err = gorm2.ApplyCollectionOperators(ctx, db, &HealthMenstruationDailyEntryORM{}, &HealthMenstruationDailyEntry{}, f, s, p, fs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeListFind); ok {\n\t\tif db, err = hook.BeforeListFind(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb = db.Where(&ormObj)\n\tdb = db.Order(\"id\")\n\tormResponse := []HealthMenstruationDailyEntryORM{}\n\tif err := db.Find(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterListFind); ok {\n\t\tif err = hook.AfterListFind(ctx, db, &ormResponse, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse := []*HealthMenstruationDailyEntry{}\n\tfor _, responseEntry := range ormResponse {\n\t\ttemp, err := responseEntry.ToPB(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbResponse = append(pbResponse, &temp)\n\t}\n\treturn pbResponse, nil\n}", "func DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx context.Context, patchee *HealthMenstruationDailyEntry, patcher *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfileId\" {\n\t\t\tpatchee.ProfileId = patcher.ProfileId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Day\" {\n\t\t\tpatchee.Day = patcher.Day\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"IntensityPercentage\" {\n\t\t\tpatchee.IntensityPercentage = patcher.IntensityPercentage\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Type\" {\n\t\t\tpatchee.Type = patcher.Type\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Manual\" {\n\t\t\tpatchee.Manual = patcher.Manual\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"BasedOnPrediction\" {\n\t\t\tpatchee.BasedOnPrediction = patcher.BasedOnPrediction\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultReadHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ormObj.Id == 0 {\n\t\treturn nil, errors1.EmptyIdError\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeReadApplyQuery); ok {\n\t\tif db, err = hook.BeforeReadApplyQuery(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif db, err = gorm2.ApplyFieldSelection(ctx, db, nil, &HealthMenstruationDailyEntryORM{}); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeReadFind); ok {\n\t\tif db, err = hook.BeforeReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tormResponse := HealthMenstruationDailyEntryORM{}\n\tif err = db.Where(&ormObj).First(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormResponse).(HealthMenstruationDailyEntryORMWithAfterReadFind); ok {\n\t\tif err = hook.AfterReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormResponse.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultCreateHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeCreate_); ok {\n\t\tif db, err = hook.BeforeCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Create(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterCreate_); ok {\n\t\tif err = hook.AfterCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultStrictUpdateHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, fmt.Errorf(\"Nil argument to DefaultStrictUpdateHealthMenstruationPersonalInfo\")\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlockedRow := &HealthMenstruationPersonalInfoORM{}\n\tdb.Model(&ormObj).Set(\"gorm:query_option\", \"FOR UPDATE\").Where(\"id=?\", ormObj.Id).First(lockedRow)\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeStrictUpdateCleanup); ok {\n\t\tif db, err = hook.BeforeStrictUpdateCleanup(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeStrictUpdateSave); ok {\n\t\tif db, err = hook.BeforeStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Save(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterStrictUpdateSave); ok {\n\t\tif err = hook.AfterStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &pbResponse, err\n}", "func (m *HealthMenstruationDailyEntry) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for Id\n\n\tif v, ok := interface{}(m.GetCreatedAt()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationDailyEntryValidationError{\n\t\t\t\tfield: \"CreatedAt\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetUpdatedAt()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationDailyEntryValidationError{\n\t\t\t\tfield: \"UpdatedAt\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\t// no validation rules for ProfileId\n\n\tif v, ok := interface{}(m.GetDay()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationDailyEntryValidationError{\n\t\t\t\tfield: \"Day\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\t// no validation rules for IntensityPercentage\n\n\t// no validation rules for Type\n\n\t// no validation rules for Manual\n\n\t// no validation rules for BasedOnPrediction\n\n\treturn nil\n}", "func (pr *PersistedRevision) UpdateAllChildren(children map[string][]Revision, branch *Branch) Revision {\n\tlog.Debugw(\"updating-all-persisted-children\", log.Fields{\"hash\": pr.GetHash()})\n\n\tnewNPR := pr.Revision.UpdateAllChildren(children, branch)\n\n\tnewPR := &PersistedRevision{\n\t\tRevision: newNPR,\n\t\tCompress: pr.Compress,\n\t\tkvStore: pr.kvStore,\n\t\tevents: pr.events,\n\t}\n\n\tif newPR.GetHash() != pr.GetHash() {\n\t\tnewPR.isWatched = false\n\t\tnewPR.isStored = false\n\t\tpr.Drop(branch.Txid, false)\n\t\tnewPR.SetupWatch(newPR.GetName())\n\t} else {\n\t\tnewPR.isWatched = true\n\t\tnewPR.isStored = true\n\t}\n\n\treturn newPR\n}", "func (hc *LegacyHealthCheckImpl) updateHealth(ts *LegacyTabletStats, conn queryservice.QueryService) {\n\t// Unconditionally send the received update at the end.\n\tdefer func() {\n\t\tif hc.listener != nil {\n\t\t\thc.listener.StatsUpdate(ts)\n\t\t}\n\t}()\n\n\thc.mu.Lock()\n\tth, ok := hc.addrToHealth[ts.Key]\n\tif !ok {\n\t\t// This can happen on delete because the entry is removed first,\n\t\t// or if LegacyHealthCheckImpl has been closed.\n\t\thc.mu.Unlock()\n\t\treturn\n\t}\n\toldts := th.latestTabletStats\n\tth.latestTabletStats = *ts\n\tth.conn = conn\n\thc.mu.Unlock()\n\n\t// In the case where a tablet changes type (but not for the\n\t// initial message), we want to log it, and maybe advertise it too.\n\tif oldts.Target.TabletType != topodatapb.TabletType_UNKNOWN && oldts.Target.TabletType != ts.Target.TabletType {\n\t\t// Log and maybe notify\n\t\tlog.Infof(\"HealthCheckUpdate(Type Change): %v, tablet: %s, target %+v => %+v, reparent time: %v\",\n\t\t\toldts.Name, topotools.TabletIdent(oldts.Tablet), topotools.TargetIdent(oldts.Target), topotools.TargetIdent(ts.Target), ts.TabletExternallyReparentedTimestamp)\n\t\tif hc.listener != nil && hc.sendDownEvents {\n\t\t\toldts.Up = false\n\t\t\thc.listener.StatsUpdate(&oldts)\n\t\t}\n\n\t\t// Track how often a tablet gets promoted to master. It is used for\n\t\t// comparing against the variables in go/vtgate/buffer/variables.go.\n\t\tif oldts.Target.TabletType != topodatapb.TabletType_MASTER && ts.Target.TabletType == topodatapb.TabletType_MASTER {\n\t\t\thcMasterPromotedCounters.Add([]string{ts.Target.Keyspace, ts.Target.Shard}, 1)\n\t\t}\n\t}\n}", "func (hook *AtsHook) Fire(entry *logrus.Entry) error {\n\n\trowKey := strconv.FormatInt(int64(entry.Time.UnixNano()), 10)\n\ttableEntry := hook.table.GetEntityReference(\"logrus\",rowKey )\n\tprops := make(map[string]interface{})\n\n\t// technically dont need to make since entry.Data is already a map to interface. But will keep mapping here incase it changes.\n\tfor k,v := range entry.Data {\n\t\tprops[k] = v\n\t}\n\tprops[timestampID] = entry.Time.UTC()\n\tprops[levelID] = entry.Level.String()\n\tprops[messageID] = entry.Message\t\n\ttableEntry.Properties = props\n\terr := tableEntry.Insert(storage.EmptyPayload, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\n\treturn nil\n}", "func (client ModelClient) UpdateHierarchicalEntityChildResponder(resp *http.Response) (result OperationStatus, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (honest *Honest) flushUpdates() {\n\n\thonest.blockUpdates = honest.blockUpdates[:0]\n}", "func (client ModelClient) UpdateHierarchicalEntityResponder(resp *http.Response) (result OperationStatus, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (du *DoctorinfoUpdate) ClearEducationlevel() *DoctorinfoUpdate {\n\tdu.mutation.ClearEducationlevel()\n\treturn du\n}", "func updateNestedSetLimitsInDB() {\n log := logger.GetLoggerInstance()\n updateChannel = make(chan *sqliteNSOP, 5000)\n for true {\n nsOp := <- updateChannel //Blocking call to read from channel.\n err := nsOp.dataObj.updateIds(nsOp.conn)\n if err != nil {\n log.Error(\"Failed to update record %s err: %s\",\n nsOp.dataObj.Uid, err)\n continue\n }\n log.Trace(\"Record %s is updated with nestedset fields\",\n nsOp.dataObj.Uid)\n }\n}", "func (o *Tree) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {\n\tvar err error\n\tif err = o.doBeforeUpdateHooks(ctx, exec); err != nil {\n\t\treturn 0, err\n\t}\n\tkey := makeCacheKey(columns, nil)\n\ttreeUpdateCacheMut.RLock()\n\tcache, cached := treeUpdateCache[key]\n\ttreeUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := columns.UpdateColumnSet(\n\t\t\ttreeAllColumns,\n\t\t\ttreePrimaryKeyColumns,\n\t\t)\n\n\t\tif !columns.IsWhitelist() {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn 0, errors.New(\"models: unable to update trees, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE \\\"trees\\\" SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"\\\"\", \"\\\"\", 1, wl),\n\t\t\tstrmangle.WhereClause(\"\\\"\", \"\\\"\", len(wl)+1, treePrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(treeType, treeMapping, append(wl, treePrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, cache.query)\n\t\tfmt.Fprintln(writer, values)\n\t}\n\tvar result sql.Result\n\tresult, err = exec.ExecContext(ctx, cache.query, values...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to update trees row\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to get rows affected by update for trees\")\n\t}\n\n\tif !cached {\n\t\ttreeUpdateCacheMut.Lock()\n\t\ttreeUpdateCache[key] = cache\n\t\ttreeUpdateCacheMut.Unlock()\n\t}\n\n\treturn rowsAff, o.doAfterUpdateHooks(ctx, exec)\n}", "func (u updateCachedUploadRevision) apply(data *journalPersist) {\n\tc := data.CachedRevisions[u.Revision.ParentID.String()]\n\tc.Revision = u.Revision\n\tif u.SectorIndex == len(c.MerkleRoots) {\n\t\tc.MerkleRoots = append(c.MerkleRoots, u.SectorRoot)\n\t} else if u.SectorIndex < len(c.MerkleRoots) {\n\t\tc.MerkleRoots[u.SectorIndex] = u.SectorRoot\n\t} else {\n\t\t// Shouldn't happen. TODO: Add correct error handling.\n\t}\n\tdata.CachedRevisions[u.Revision.ParentID.String()] = c\n}", "func (duo *DoctorinfoUpdateOne) ClearEducationlevel() *DoctorinfoUpdateOne {\n\tduo.mutation.ClearEducationlevel()\n\treturn duo\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DefaultPatchHealthMenstruationDailyEntry executes a basic gorm update call with patch behavior
func DefaultPatchHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) { if in == nil { return nil, errors1.NilArgumentError } var pbObj HealthMenstruationDailyEntry var err error if hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchRead); ok { if db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil { return nil, err } } pbReadRes, err := DefaultReadHealthMenstruationDailyEntry(ctx, &HealthMenstruationDailyEntry{Id: in.GetId()}, db) if err != nil { return nil, err } pbObj = *pbReadRes if hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchApplyFieldMask); ok { if db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil { return nil, err } } if _, err := DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx, &pbObj, in, updateMask, "", db); err != nil { return nil, err } if hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchSave); ok { if db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil { return nil, err } } pbResponse, err := DefaultStrictUpdateHealthMenstruationDailyEntry(ctx, &pbObj, db) if err != nil { return nil, err } if hook, ok := interface{}(pbResponse).(HealthMenstruationDailyEntryWithAfterPatchSave); ok { if err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil { return nil, err } } return pbResponse, nil }
[ "func DefaultPatchSetHealthMenstruationDailyEntry(ctx context.Context, objects []*HealthMenstruationDailyEntry, updateMasks []*field_mask1.FieldMask, db *gorm1.DB) ([]*HealthMenstruationDailyEntry, error) {\n\tif len(objects) != len(updateMasks) {\n\t\treturn nil, fmt.Errorf(errors1.BadRepeatedFieldMaskTpl, len(updateMasks), len(objects))\n\t}\n\n\tresults := make([]*HealthMenstruationDailyEntry, 0, len(objects))\n\tfor i, patcher := range objects {\n\t\tpbResponse, err := DefaultPatchHealthMenstruationDailyEntry(ctx, patcher, updateMasks[i], db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, pbResponse)\n\t}\n\n\treturn results, nil\n}", "func DefaultStrictUpdateHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, fmt.Errorf(\"Nil argument to DefaultStrictUpdateHealthMenstruationDailyEntry\")\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlockedRow := &HealthMenstruationDailyEntryORM{}\n\tdb.Model(&ormObj).Set(\"gorm:query_option\", \"FOR UPDATE\").Where(\"id=?\", ormObj.Id).First(lockedRow)\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateCleanup); ok {\n\t\tif db, err = hook.BeforeStrictUpdateCleanup(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateSave); ok {\n\t\tif db, err = hook.BeforeStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Save(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterStrictUpdateSave); ok {\n\t\tif err = hook.AfterStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &pbResponse, err\n}", "func DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx context.Context, patchee *HealthMenstruationDailyEntry, patcher *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfileId\" {\n\t\t\tpatchee.ProfileId = patcher.ProfileId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Day\" {\n\t\t\tpatchee.Day = patcher.Day\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"IntensityPercentage\" {\n\t\t\tpatchee.IntensityPercentage = patcher.IntensityPercentage\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Type\" {\n\t\t\tpatchee.Type = patcher.Type\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Manual\" {\n\t\t\tpatchee.Manual = patcher.Manual\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"BasedOnPrediction\" {\n\t\t\tpatchee.BasedOnPrediction = patcher.BasedOnPrediction\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultListHealthMenstruationDailyEntry(ctx context.Context, db *gorm1.DB, f *query1.Filtering, s *query1.Sorting, p *query1.Pagination, fs *query1.FieldSelection) ([]*HealthMenstruationDailyEntry, error) {\n\tin := HealthMenstruationDailyEntry{}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeListApplyQuery); ok {\n\t\tif db, err = hook.BeforeListApplyQuery(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb, err = gorm2.ApplyCollectionOperators(ctx, db, &HealthMenstruationDailyEntryORM{}, &HealthMenstruationDailyEntry{}, f, s, p, fs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeListFind); ok {\n\t\tif db, err = hook.BeforeListFind(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb = db.Where(&ormObj)\n\tdb = db.Order(\"id\")\n\tormResponse := []HealthMenstruationDailyEntryORM{}\n\tif err := db.Find(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterListFind); ok {\n\t\tif err = hook.AfterListFind(ctx, db, &ormResponse, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse := []*HealthMenstruationDailyEntry{}\n\tfor _, responseEntry := range ormResponse {\n\t\ttemp, err := responseEntry.ToPB(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbResponse = append(pbResponse, &temp)\n\t}\n\treturn pbResponse, nil\n}", "func DefaultPatchHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar pbObj HealthMenstruationPersonalInfo\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadHealthMenstruationPersonalInfo(ctx, &HealthMenstruationPersonalInfo{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskHealthMenstruationPersonalInfo(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateHealthMenstruationPersonalInfo(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(HealthMenstruationPersonalInfoWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func DefaultCreateHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeCreate_); ok {\n\t\tif db, err = hook.BeforeCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Create(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterCreate_); ok {\n\t\tif err = hook.AfterCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultReadHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ormObj.Id == 0 {\n\t\treturn nil, errors1.EmptyIdError\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeReadApplyQuery); ok {\n\t\tif db, err = hook.BeforeReadApplyQuery(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif db, err = gorm2.ApplyFieldSelection(ctx, db, nil, &HealthMenstruationDailyEntryORM{}); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeReadFind); ok {\n\t\tif db, err = hook.BeforeReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tormResponse := HealthMenstruationDailyEntryORM{}\n\tif err = db.Where(&ormObj).First(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormResponse).(HealthMenstruationDailyEntryORMWithAfterReadFind); ok {\n\t\tif err = hook.AfterReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormResponse.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultStrictUpdateHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, fmt.Errorf(\"Nil argument to DefaultStrictUpdateHealthMenstruationPersonalInfo\")\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlockedRow := &HealthMenstruationPersonalInfoORM{}\n\tdb.Model(&ormObj).Set(\"gorm:query_option\", \"FOR UPDATE\").Where(\"id=?\", ormObj.Id).First(lockedRow)\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeStrictUpdateCleanup); ok {\n\t\tif db, err = hook.BeforeStrictUpdateCleanup(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeStrictUpdateSave); ok {\n\t\tif db, err = hook.BeforeStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Save(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterStrictUpdateSave); ok {\n\t\tif err = hook.AfterStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &pbResponse, err\n}", "func DefaultApplyFieldMaskHealthMenstruationPersonalInfo(ctx context.Context, patchee *HealthMenstruationPersonalInfo, patcher *HealthMenstruationPersonalInfo, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfileId\" {\n\t\t\tpatchee.ProfileId = patcher.ProfileId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"PeriodLengthInDays\" {\n\t\t\tpatchee.PeriodLengthInDays = patcher.PeriodLengthInDays\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CycleLengthInDays\" {\n\t\t\tpatchee.CycleLengthInDays = patcher.CycleLengthInDays\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func (m *UpdateHealthMenstruationDailyEntryRequest) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif v, ok := interface{}(m.GetPayload()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn UpdateHealthMenstruationDailyEntryRequestValidationError{\n\t\t\t\tfield: \"Payload\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *ManagedTenantsDeviceHealthStatusesDeviceHealthStatusItemRequestBuilder) Patch(ctx context.Context, body i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.DeviceHealthStatusable, requestConfiguration *ManagedTenantsDeviceHealthStatusesDeviceHealthStatusItemRequestBuilderPatchRequestConfiguration)(i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.DeviceHealthStatusable, error) {\n requestInfo, err := m.ToPatchRequestInformation(ctx, body, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.CreateDeviceHealthStatusFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.DeviceHealthStatusable), nil\n}", "func DefaultPatchSetHealthMenstruationPersonalInfo(ctx context.Context, objects []*HealthMenstruationPersonalInfo, updateMasks []*field_mask1.FieldMask, db *gorm1.DB) ([]*HealthMenstruationPersonalInfo, error) {\n\tif len(objects) != len(updateMasks) {\n\t\treturn nil, fmt.Errorf(errors1.BadRepeatedFieldMaskTpl, len(updateMasks), len(objects))\n\t}\n\n\tresults := make([]*HealthMenstruationPersonalInfo, 0, len(objects))\n\tfor i, patcher := range objects {\n\t\tpbResponse, err := DefaultPatchHealthMenstruationPersonalInfo(ctx, patcher, updateMasks[i], db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, pbResponse)\n\t}\n\n\treturn results, nil\n}", "func (m *HealthMenstruationDailyEntry) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for Id\n\n\tif v, ok := interface{}(m.GetCreatedAt()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationDailyEntryValidationError{\n\t\t\t\tfield: \"CreatedAt\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetUpdatedAt()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationDailyEntryValidationError{\n\t\t\t\tfield: \"UpdatedAt\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\t// no validation rules for ProfileId\n\n\tif v, ok := interface{}(m.GetDay()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationDailyEntryValidationError{\n\t\t\t\tfield: \"Day\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\t// no validation rules for IntensityPercentage\n\n\t// no validation rules for Type\n\n\t// no validation rules for Manual\n\n\t// no validation rules for BasedOnPrediction\n\n\treturn nil\n}", "func (m *UpdateHealthMenstruationDailyEntryResponse) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif v, ok := interface{}(m.GetResult()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn UpdateHealthMenstruationDailyEntryResponseValidationError{\n\t\t\t\tfield: \"Result\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *kuberhealthyChecks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result KuberhealthyCheck, err error) {\n\tresult = KuberhealthyCheck{}\n\terr = c.client.Patch(pt).\n\t\tNamespace(c.ns).\n\t\tResource(\"khchecks\").\n\t\tSubResource(subresources...).\n\t\tName(name).\n\t\tBody(data).\n\t\tDo(context.TODO()).\n\t\tInto(&result)\n\treturn\n}", "func (c *Controller) updateWorkloadEntryHealth(obj any) error {\n\tcondition := obj.(HealthCondition)\n\t// get previous status\n\tcfg := c.store.Get(gvk.WorkloadEntry, condition.entryName, condition.proxy.Metadata.Namespace)\n\tif cfg == nil {\n\t\treturn fmt.Errorf(\"failed to update health status for %v: WorkloadEntry %v not found\", condition.proxy.ID, condition.entryName)\n\t}\n\t// The workloadentry has reconnected to the other istiod\n\tif cfg.Annotations[WorkloadControllerAnnotation] != c.instanceID {\n\t\treturn nil\n\t}\n\n\t// check if the existing health status is newer than this one\n\twleStatus, ok := cfg.Status.(*v1alpha1.IstioStatus)\n\tif ok {\n\t\thealthCondition := status.GetCondition(wleStatus.Conditions, status.ConditionHealthy)\n\t\tif healthCondition != nil {\n\t\t\tif healthCondition.LastProbeTime.AsTime().After(condition.condition.LastProbeTime.AsTime()) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\n\t// replace the updated status\n\twle := status.UpdateConfigCondition(*cfg, condition.condition)\n\t// update the status\n\t_, err := c.store.UpdateStatus(wle)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error while updating WorkloadEntry health status for %s: %v\", condition.proxy.ID, err)\n\t}\n\tlog.Debugf(\"updated health status of %v to %v\", condition.proxy.ID, condition.condition)\n\treturn nil\n}", "func (hook *AtsHook) Fire(entry *logrus.Entry) error {\n\n\trowKey := strconv.FormatInt(int64(entry.Time.UnixNano()), 10)\n\ttableEntry := hook.table.GetEntityReference(\"logrus\",rowKey )\n\tprops := make(map[string]interface{})\n\n\t// technically dont need to make since entry.Data is already a map to interface. But will keep mapping here incase it changes.\n\tfor k,v := range entry.Data {\n\t\tprops[k] = v\n\t}\n\tprops[timestampID] = entry.Time.UTC()\n\tprops[levelID] = entry.Level.String()\n\tprops[messageID] = entry.Message\t\n\ttableEntry.Properties = props\n\terr := tableEntry.Insert(storage.EmptyPayload, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\n\treturn nil\n}", "func (c *cronFederatedHPAs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CronFederatedHPA, err error) {\n\tresult = &v1alpha1.CronFederatedHPA{}\n\terr = c.client.Patch(pt).\n\t\tNamespace(c.ns).\n\t\tResource(\"cronfederatedhpas\").\n\t\tName(name).\n\t\tSubResource(subresources...).\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tBody(data).\n\t\tDo(ctx).\n\t\tInto(result)\n\treturn\n}", "func (a *HyperflexApiService) PatchHyperflexSoftwareDistributionEntry(ctx context.Context, moid string) ApiPatchHyperflexSoftwareDistributionEntryRequest {\n\treturn ApiPatchHyperflexSoftwareDistributionEntryRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DefaultPatchSetHealthMenstruationDailyEntry executes a bulk gorm update call with patch behavior
func DefaultPatchSetHealthMenstruationDailyEntry(ctx context.Context, objects []*HealthMenstruationDailyEntry, updateMasks []*field_mask1.FieldMask, db *gorm1.DB) ([]*HealthMenstruationDailyEntry, error) { if len(objects) != len(updateMasks) { return nil, fmt.Errorf(errors1.BadRepeatedFieldMaskTpl, len(updateMasks), len(objects)) } results := make([]*HealthMenstruationDailyEntry, 0, len(objects)) for i, patcher := range objects { pbResponse, err := DefaultPatchHealthMenstruationDailyEntry(ctx, patcher, updateMasks[i], db) if err != nil { return nil, err } results = append(results, pbResponse) } return results, nil }
[ "func DefaultPatchHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar pbObj HealthMenstruationDailyEntry\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadHealthMenstruationDailyEntry(ctx, &HealthMenstruationDailyEntry{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateHealthMenstruationDailyEntry(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(HealthMenstruationDailyEntryWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx context.Context, patchee *HealthMenstruationDailyEntry, patcher *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfileId\" {\n\t\t\tpatchee.ProfileId = patcher.ProfileId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Day\" {\n\t\t\tpatchee.Day = patcher.Day\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"IntensityPercentage\" {\n\t\t\tpatchee.IntensityPercentage = patcher.IntensityPercentage\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Type\" {\n\t\t\tpatchee.Type = patcher.Type\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Manual\" {\n\t\t\tpatchee.Manual = patcher.Manual\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"BasedOnPrediction\" {\n\t\t\tpatchee.BasedOnPrediction = patcher.BasedOnPrediction\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultStrictUpdateHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, fmt.Errorf(\"Nil argument to DefaultStrictUpdateHealthMenstruationDailyEntry\")\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlockedRow := &HealthMenstruationDailyEntryORM{}\n\tdb.Model(&ormObj).Set(\"gorm:query_option\", \"FOR UPDATE\").Where(\"id=?\", ormObj.Id).First(lockedRow)\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateCleanup); ok {\n\t\tif db, err = hook.BeforeStrictUpdateCleanup(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateSave); ok {\n\t\tif db, err = hook.BeforeStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Save(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterStrictUpdateSave); ok {\n\t\tif err = hook.AfterStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &pbResponse, err\n}", "func DefaultListHealthMenstruationDailyEntry(ctx context.Context, db *gorm1.DB, f *query1.Filtering, s *query1.Sorting, p *query1.Pagination, fs *query1.FieldSelection) ([]*HealthMenstruationDailyEntry, error) {\n\tin := HealthMenstruationDailyEntry{}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeListApplyQuery); ok {\n\t\tif db, err = hook.BeforeListApplyQuery(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb, err = gorm2.ApplyCollectionOperators(ctx, db, &HealthMenstruationDailyEntryORM{}, &HealthMenstruationDailyEntry{}, f, s, p, fs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeListFind); ok {\n\t\tif db, err = hook.BeforeListFind(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb = db.Where(&ormObj)\n\tdb = db.Order(\"id\")\n\tormResponse := []HealthMenstruationDailyEntryORM{}\n\tif err := db.Find(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterListFind); ok {\n\t\tif err = hook.AfterListFind(ctx, db, &ormResponse, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse := []*HealthMenstruationDailyEntry{}\n\tfor _, responseEntry := range ormResponse {\n\t\ttemp, err := responseEntry.ToPB(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbResponse = append(pbResponse, &temp)\n\t}\n\treturn pbResponse, nil\n}", "func DefaultPatchHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar pbObj HealthMenstruationPersonalInfo\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadHealthMenstruationPersonalInfo(ctx, &HealthMenstruationPersonalInfo{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskHealthMenstruationPersonalInfo(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateHealthMenstruationPersonalInfo(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(HealthMenstruationPersonalInfoWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func DefaultStrictUpdateHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, fmt.Errorf(\"Nil argument to DefaultStrictUpdateHealthMenstruationPersonalInfo\")\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlockedRow := &HealthMenstruationPersonalInfoORM{}\n\tdb.Model(&ormObj).Set(\"gorm:query_option\", \"FOR UPDATE\").Where(\"id=?\", ormObj.Id).First(lockedRow)\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeStrictUpdateCleanup); ok {\n\t\tif db, err = hook.BeforeStrictUpdateCleanup(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeStrictUpdateSave); ok {\n\t\tif db, err = hook.BeforeStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Save(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterStrictUpdateSave); ok {\n\t\tif err = hook.AfterStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &pbResponse, err\n}", "func DefaultReadHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ormObj.Id == 0 {\n\t\treturn nil, errors1.EmptyIdError\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeReadApplyQuery); ok {\n\t\tif db, err = hook.BeforeReadApplyQuery(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif db, err = gorm2.ApplyFieldSelection(ctx, db, nil, &HealthMenstruationDailyEntryORM{}); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeReadFind); ok {\n\t\tif db, err = hook.BeforeReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tormResponse := HealthMenstruationDailyEntryORM{}\n\tif err = db.Where(&ormObj).First(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormResponse).(HealthMenstruationDailyEntryORMWithAfterReadFind); ok {\n\t\tif err = hook.AfterReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormResponse.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultPatchSetHealthMenstruationPersonalInfo(ctx context.Context, objects []*HealthMenstruationPersonalInfo, updateMasks []*field_mask1.FieldMask, db *gorm1.DB) ([]*HealthMenstruationPersonalInfo, error) {\n\tif len(objects) != len(updateMasks) {\n\t\treturn nil, fmt.Errorf(errors1.BadRepeatedFieldMaskTpl, len(updateMasks), len(objects))\n\t}\n\n\tresults := make([]*HealthMenstruationPersonalInfo, 0, len(objects))\n\tfor i, patcher := range objects {\n\t\tpbResponse, err := DefaultPatchHealthMenstruationPersonalInfo(ctx, patcher, updateMasks[i], db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, pbResponse)\n\t}\n\n\treturn results, nil\n}", "func DefaultCreateHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeCreate_); ok {\n\t\tif db, err = hook.BeforeCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Create(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterCreate_); ok {\n\t\tif err = hook.AfterCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultApplyFieldMaskHealthMenstruationPersonalInfo(ctx context.Context, patchee *HealthMenstruationPersonalInfo, patcher *HealthMenstruationPersonalInfo, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfileId\" {\n\t\t\tpatchee.ProfileId = patcher.ProfileId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"PeriodLengthInDays\" {\n\t\t\tpatchee.PeriodLengthInDays = patcher.PeriodLengthInDays\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CycleLengthInDays\" {\n\t\t\tpatchee.CycleLengthInDays = patcher.CycleLengthInDays\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func (m *ManagedTenantsDeviceHealthStatusesDeviceHealthStatusItemRequestBuilder) Patch(ctx context.Context, body i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.DeviceHealthStatusable, requestConfiguration *ManagedTenantsDeviceHealthStatusesDeviceHealthStatusItemRequestBuilderPatchRequestConfiguration)(i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.DeviceHealthStatusable, error) {\n requestInfo, err := m.ToPatchRequestInformation(ctx, body, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.CreateDeviceHealthStatusFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.DeviceHealthStatusable), nil\n}", "func (m *UpdateHealthMenstruationDailyEntryRequest) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif v, ok := interface{}(m.GetPayload()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn UpdateHealthMenstruationDailyEntryRequestValidationError{\n\t\t\t\tfield: \"Payload\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *UpdateHealthMenstruationDailyEntryResponse) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif v, ok := interface{}(m.GetResult()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn UpdateHealthMenstruationDailyEntryResponseValidationError{\n\t\t\t\tfield: \"Result\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *HealthMenstruationDailyEntry) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for Id\n\n\tif v, ok := interface{}(m.GetCreatedAt()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationDailyEntryValidationError{\n\t\t\t\tfield: \"CreatedAt\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetUpdatedAt()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationDailyEntryValidationError{\n\t\t\t\tfield: \"UpdatedAt\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\t// no validation rules for ProfileId\n\n\tif v, ok := interface{}(m.GetDay()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationDailyEntryValidationError{\n\t\t\t\tfield: \"Day\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\t// no validation rules for IntensityPercentage\n\n\t// no validation rules for Type\n\n\t// no validation rules for Manual\n\n\t// no validation rules for BasedOnPrediction\n\n\treturn nil\n}", "func (m *OnlineMeetingsItemAttendanceReportsMeetingAttendanceReportItemRequestBuilder) Patch(ctx context.Context, body iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.MeetingAttendanceReportable, requestConfiguration *OnlineMeetingsItemAttendanceReportsMeetingAttendanceReportItemRequestBuilderPatchRequestConfiguration)(iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.MeetingAttendanceReportable, error) {\n requestInfo, err := m.ToPatchRequestInformation(ctx, body, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.CreateMeetingAttendanceReportFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.MeetingAttendanceReportable), nil\n}", "func (hook *AtsHook) Fire(entry *logrus.Entry) error {\n\n\trowKey := strconv.FormatInt(int64(entry.Time.UnixNano()), 10)\n\ttableEntry := hook.table.GetEntityReference(\"logrus\",rowKey )\n\tprops := make(map[string]interface{})\n\n\t// technically dont need to make since entry.Data is already a map to interface. But will keep mapping here incase it changes.\n\tfor k,v := range entry.Data {\n\t\tprops[k] = v\n\t}\n\tprops[timestampID] = entry.Time.UTC()\n\tprops[levelID] = entry.Level.String()\n\tprops[messageID] = entry.Message\t\n\ttableEntry.Properties = props\n\terr := tableEntry.Insert(storage.EmptyPayload, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\n\treturn nil\n}", "func (o *Smallblog) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {\n\tvar err error\n\tif err = o.doBeforeUpdateHooks(ctx, exec); err != nil {\n\t\treturn 0, err\n\t}\n\tkey := makeCacheKey(columns, nil)\n\tsmallblogUpdateCacheMut.RLock()\n\tcache, cached := smallblogUpdateCache[key]\n\tsmallblogUpdateCacheMut.RUnlock()\n\n\tif !cached {\n\t\twl := columns.UpdateColumnSet(\n\t\t\tsmallblogAllColumns,\n\t\t\tsmallblogPrimaryKeyColumns,\n\t\t)\n\n\t\tif !columns.IsWhitelist() {\n\t\t\twl = strmangle.SetComplement(wl, []string{\"created_at\"})\n\t\t}\n\t\tif len(wl) == 0 {\n\t\t\treturn 0, errors.New(\"models: unable to update smallblog, could not build whitelist\")\n\t\t}\n\n\t\tcache.query = fmt.Sprintf(\"UPDATE `smallblog` SET %s WHERE %s\",\n\t\t\tstrmangle.SetParamNames(\"`\", \"`\", 0, wl),\n\t\t\tstrmangle.WhereClause(\"`\", \"`\", 0, smallblogPrimaryKeyColumns),\n\t\t)\n\t\tcache.valueMapping, err = queries.BindMapping(smallblogType, smallblogMapping, append(wl, smallblogPrimaryKeyColumns...))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tvalues := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, cache.query)\n\t\tfmt.Fprintln(writer, values)\n\t}\n\tvar result sql.Result\n\tresult, err = exec.ExecContext(ctx, cache.query, values...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to update smallblog row\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to get rows affected by update for smallblog\")\n\t}\n\n\tif !cached {\n\t\tsmallblogUpdateCacheMut.Lock()\n\t\tsmallblogUpdateCache[key] = cache\n\t\tsmallblogUpdateCacheMut.Unlock()\n\t}\n\n\treturn rowsAff, o.doAfterUpdateHooks(ctx, exec)\n}", "func (a *BulkApiService) PatchBulkExport(ctx context.Context, moid string) ApiPatchBulkExportRequest {\n\treturn ApiPatchBulkExportRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tmoid: moid,\n\t}\n}", "func (m *ManagedTenantsManagedTenantAlertsManagedTenantAlertItemRequestBuilder) Patch(ctx context.Context, body i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.ManagedTenantAlertable, requestConfiguration *ManagedTenantsManagedTenantAlertsManagedTenantAlertItemRequestBuilderPatchRequestConfiguration)(i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.ManagedTenantAlertable, error) {\n requestInfo, err := m.ToPatchRequestInformation(ctx, body, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.CreateManagedTenantAlertFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(i72d786f54cc0bb289c971b085dd642b2fc3af6394328682e69783fd7e229b582.ManagedTenantAlertable), nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DefaultApplyFieldMaskHealthMenstruationDailyEntry patches an pbObject with patcher according to a field mask.
func DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx context.Context, patchee *HealthMenstruationDailyEntry, patcher *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) { if patcher == nil { return nil, nil } else if patchee == nil { return nil, errors1.NilArgumentError } var err error for _, f := range updateMask.Paths { if f == prefix+"Id" { patchee.Id = patcher.Id continue } if f == prefix+"CreatedAt" { patchee.CreatedAt = patcher.CreatedAt continue } if f == prefix+"UpdatedAt" { patchee.UpdatedAt = patcher.UpdatedAt continue } if f == prefix+"ProfileId" { patchee.ProfileId = patcher.ProfileId continue } if f == prefix+"Day" { patchee.Day = patcher.Day continue } if f == prefix+"IntensityPercentage" { patchee.IntensityPercentage = patcher.IntensityPercentage continue } if f == prefix+"Type" { patchee.Type = patcher.Type continue } if f == prefix+"Manual" { patchee.Manual = patcher.Manual continue } if f == prefix+"BasedOnPrediction" { patchee.BasedOnPrediction = patcher.BasedOnPrediction continue } } if err != nil { return nil, err } return patchee, nil }
[ "func DefaultPatchSetHealthMenstruationDailyEntry(ctx context.Context, objects []*HealthMenstruationDailyEntry, updateMasks []*field_mask1.FieldMask, db *gorm1.DB) ([]*HealthMenstruationDailyEntry, error) {\n\tif len(objects) != len(updateMasks) {\n\t\treturn nil, fmt.Errorf(errors1.BadRepeatedFieldMaskTpl, len(updateMasks), len(objects))\n\t}\n\n\tresults := make([]*HealthMenstruationDailyEntry, 0, len(objects))\n\tfor i, patcher := range objects {\n\t\tpbResponse, err := DefaultPatchHealthMenstruationDailyEntry(ctx, patcher, updateMasks[i], db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, pbResponse)\n\t}\n\n\treturn results, nil\n}", "func DefaultPatchHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar pbObj HealthMenstruationDailyEntry\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadHealthMenstruationDailyEntry(ctx, &HealthMenstruationDailyEntry{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateHealthMenstruationDailyEntry(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(HealthMenstruationDailyEntryWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func DefaultApplyFieldMaskHealthMenstruationPersonalInfo(ctx context.Context, patchee *HealthMenstruationPersonalInfo, patcher *HealthMenstruationPersonalInfo, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfileId\" {\n\t\t\tpatchee.ProfileId = patcher.ProfileId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"PeriodLengthInDays\" {\n\t\t\tpatchee.PeriodLengthInDays = patcher.PeriodLengthInDays\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CycleLengthInDays\" {\n\t\t\tpatchee.CycleLengthInDays = patcher.CycleLengthInDays\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultApplyFieldMaskLogActivity(ctx context.Context, patchee *LogActivity, patcher *LogActivity, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*LogActivity, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Ip\" {\n\t\t\tpatchee.Ip = patcher.Ip\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfileId\" {\n\t\t\tpatchee.ProfileId = patcher.ProfileId\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultApplyFieldMaskUserInfo(ctx context.Context, patchee *UserInfo, patcher *UserInfo, updateMask *field_mask.FieldMask, prefix string, db *gorm.DB) (*UserInfo, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors.NilArgumentError\n\t}\n\tvar err error\n\tvar updatedCreatedAt bool\n\tvar updatedUpdatedAt bool\n\tfor i, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UserId\" {\n\t\t\tpatchee.UserId = patcher.UserId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"LastName\" {\n\t\t\tpatchee.LastName = patcher.LastName\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"FirstName\" {\n\t\t\tpatchee.FirstName = patcher.FirstName\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Period\" {\n\t\t\tpatchee.Period = patcher.Period\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"DepartmentId\" {\n\t\t\tpatchee.DepartmentId = patcher.DepartmentId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"JobId\" {\n\t\t\tpatchee.JobId = patcher.JobId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"EnrollmentFlg\" {\n\t\t\tpatchee.EnrollmentFlg = patcher.EnrollmentFlg\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"AdminFlg\" {\n\t\t\tpatchee.AdminFlg = patcher.AdminFlg\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedCreatedAt && strings.HasPrefix(f, prefix+\"CreatedAt.\") {\n\t\t\tif patcher.CreatedAt == nil {\n\t\t\t\tpatchee.CreatedAt = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.CreatedAt == nil {\n\t\t\t\tpatchee.CreatedAt = &timestamp.Timestamp{}\n\t\t\t}\n\t\t\tchildMask := &field_mask.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"CreatedAt.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm1.MergeWithMask(patcher.CreatedAt, patchee.CreatedAt, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tupdatedCreatedAt = true\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedUpdatedAt && strings.HasPrefix(f, prefix+\"UpdatedAt.\") {\n\t\t\tif patcher.UpdatedAt == nil {\n\t\t\t\tpatchee.UpdatedAt = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.UpdatedAt == nil {\n\t\t\t\tpatchee.UpdatedAt = &timestamp.Timestamp{}\n\t\t\t}\n\t\t\tchildMask := &field_mask.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"UpdatedAt.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm1.MergeWithMask(patcher.UpdatedAt, patchee.UpdatedAt, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tupdatedUpdatedAt = true\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultApplyFieldMaskPeriod(ctx context.Context, patchee *Period, patcher *Period, updateMask *field_mask.FieldMask, prefix string, db *gorm.DB) (*Period, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors.NilArgumentError\n\t}\n\tvar err error\n\tvar updatedCreatedAt bool\n\tvar updatedUpdatedAt bool\n\tfor i, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Period\" {\n\t\t\tpatchee.Period = patcher.Period\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedCreatedAt && strings.HasPrefix(f, prefix+\"CreatedAt.\") {\n\t\t\tif patcher.CreatedAt == nil {\n\t\t\t\tpatchee.CreatedAt = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.CreatedAt == nil {\n\t\t\t\tpatchee.CreatedAt = &timestamp.Timestamp{}\n\t\t\t}\n\t\t\tchildMask := &field_mask.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"CreatedAt.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm1.MergeWithMask(patcher.CreatedAt, patchee.CreatedAt, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tupdatedCreatedAt = true\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedUpdatedAt && strings.HasPrefix(f, prefix+\"UpdatedAt.\") {\n\t\t\tif patcher.UpdatedAt == nil {\n\t\t\t\tpatchee.UpdatedAt = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.UpdatedAt == nil {\n\t\t\t\tpatchee.UpdatedAt = &timestamp.Timestamp{}\n\t\t\t}\n\t\t\tchildMask := &field_mask.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"UpdatedAt.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm1.MergeWithMask(patcher.UpdatedAt, patchee.UpdatedAt, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tupdatedUpdatedAt = true\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultStrictUpdateHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, fmt.Errorf(\"Nil argument to DefaultStrictUpdateHealthMenstruationDailyEntry\")\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlockedRow := &HealthMenstruationDailyEntryORM{}\n\tdb.Model(&ormObj).Set(\"gorm:query_option\", \"FOR UPDATE\").Where(\"id=?\", ormObj.Id).First(lockedRow)\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateCleanup); ok {\n\t\tif db, err = hook.BeforeStrictUpdateCleanup(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateSave); ok {\n\t\tif db, err = hook.BeforeStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Save(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterStrictUpdateSave); ok {\n\t\tif err = hook.AfterStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &pbResponse, err\n}", "func DefaultApplyFieldMaskAddress(ctx context.Context, patchee *Address, patcher *Address, updateMask *field_mask.FieldMask, prefix string, db *gorm.DB) (*Address, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Address\" {\n\t\t\tpatchee.Address = patcher.Address\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Unit\" {\n\t\t\tpatchee.Unit = patcher.Unit\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ZipCode\" {\n\t\t\tpatchee.ZipCode = patcher.ZipCode\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"City\" {\n\t\t\tpatchee.City = patcher.City\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"State\" {\n\t\t\tpatchee.State = patcher.State\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Longitude\" {\n\t\t\tpatchee.Longitude = patcher.Longitude\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Lattitude\" {\n\t\t\tpatchee.Lattitude = patcher.Lattitude\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultApplyFieldMaskComment(ctx context.Context, patchee *Comment, patcher *Comment, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*Comment, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tvar updatedCreatedAt bool\n\tvar updatedUpdatedAt bool\n\tvar updatedDeletedAt bool\n\tvar updatedBoardId bool\n\tvar updatedPostId bool\n\tvar updatedContentId bool\n\tvar updatedUserid bool\n\tvar updatedUsername bool\n\tvar updatedNickname bool\n\tvar updatedEmail bool\n\tvar updatedPassword bool\n\tvar updatedUrl bool\n\tvar updatedUseHtml bool\n\tvar updatedUseSecret bool\n\tfor i, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedCreatedAt && strings.HasPrefix(f, prefix+\"CreatedAt.\") {\n\t\t\tif patcher.CreatedAt == nil {\n\t\t\t\tpatchee.CreatedAt = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.CreatedAt == nil {\n\t\t\t\tpatchee.CreatedAt = &timestamp.Timestamp{}\n\t\t\t}\n\t\t\tchildMask := &field_mask1.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"CreatedAt.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm2.MergeWithMask(patcher.CreatedAt, patchee.CreatedAt, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tupdatedCreatedAt = true\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedUpdatedAt && strings.HasPrefix(f, prefix+\"UpdatedAt.\") {\n\t\t\tif patcher.UpdatedAt == nil {\n\t\t\t\tpatchee.UpdatedAt = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.UpdatedAt == nil {\n\t\t\t\tpatchee.UpdatedAt = &timestamp.Timestamp{}\n\t\t\t}\n\t\t\tchildMask := &field_mask1.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"UpdatedAt.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm2.MergeWithMask(patcher.UpdatedAt, patchee.UpdatedAt, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tupdatedUpdatedAt = true\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedDeletedAt && strings.HasPrefix(f, prefix+\"DeletedAt.\") {\n\t\t\tif patcher.DeletedAt == nil {\n\t\t\t\tpatchee.DeletedAt = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.DeletedAt == nil {\n\t\t\t\tpatchee.DeletedAt = &timestamp.Timestamp{}\n\t\t\t}\n\t\t\tchildMask := &field_mask1.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"DeletedAt.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm2.MergeWithMask(patcher.DeletedAt, patchee.DeletedAt, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"DeletedAt\" {\n\t\t\tupdatedDeletedAt = true\n\t\t\tpatchee.DeletedAt = patcher.DeletedAt\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedBoardId && strings.HasPrefix(f, prefix+\"BoardId.\") {\n\t\t\tif patcher.BoardId == nil {\n\t\t\t\tpatchee.BoardId = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.BoardId == nil {\n\t\t\t\tpatchee.BoardId = &wrappers.StringValue{}\n\t\t\t}\n\t\t\tchildMask := &field_mask1.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"BoardId.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm2.MergeWithMask(patcher.BoardId, patchee.BoardId, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"BoardId\" {\n\t\t\tupdatedBoardId = true\n\t\t\tpatchee.BoardId = patcher.BoardId\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedPostId && strings.HasPrefix(f, prefix+\"PostId.\") {\n\t\t\tif patcher.PostId == nil {\n\t\t\t\tpatchee.PostId = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.PostId == nil {\n\t\t\t\tpatchee.PostId = &wrappers.StringValue{}\n\t\t\t}\n\t\t\tchildMask := &field_mask1.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"PostId.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm2.MergeWithMask(patcher.PostId, patchee.PostId, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"PostId\" {\n\t\t\tupdatedPostId = true\n\t\t\tpatchee.PostId = patcher.PostId\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedContentId && strings.HasPrefix(f, prefix+\"ContentId.\") {\n\t\t\tif patcher.ContentId == nil {\n\t\t\t\tpatchee.ContentId = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.ContentId == nil {\n\t\t\t\tpatchee.ContentId = &wrappers.StringValue{}\n\t\t\t}\n\t\t\tchildMask := &field_mask1.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"ContentId.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm2.MergeWithMask(patcher.ContentId, patchee.ContentId, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"ContentId\" {\n\t\t\tupdatedContentId = true\n\t\t\tpatchee.ContentId = patcher.ContentId\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedUserid && strings.HasPrefix(f, prefix+\"Userid.\") {\n\t\t\tif patcher.Userid == nil {\n\t\t\t\tpatchee.Userid = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.Userid == nil {\n\t\t\t\tpatchee.Userid = &wrappers.StringValue{}\n\t\t\t}\n\t\t\tchildMask := &field_mask1.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"Userid.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm2.MergeWithMask(patcher.Userid, patchee.Userid, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"Userid\" {\n\t\t\tupdatedUserid = true\n\t\t\tpatchee.Userid = patcher.Userid\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedUsername && strings.HasPrefix(f, prefix+\"Username.\") {\n\t\t\tif patcher.Username == nil {\n\t\t\t\tpatchee.Username = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.Username == nil {\n\t\t\t\tpatchee.Username = &wrappers.StringValue{}\n\t\t\t}\n\t\t\tchildMask := &field_mask1.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"Username.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm2.MergeWithMask(patcher.Username, patchee.Username, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"Username\" {\n\t\t\tupdatedUsername = true\n\t\t\tpatchee.Username = patcher.Username\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedNickname && strings.HasPrefix(f, prefix+\"Nickname.\") {\n\t\t\tif patcher.Nickname == nil {\n\t\t\t\tpatchee.Nickname = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.Nickname == nil {\n\t\t\t\tpatchee.Nickname = &wrappers.StringValue{}\n\t\t\t}\n\t\t\tchildMask := &field_mask1.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"Nickname.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm2.MergeWithMask(patcher.Nickname, patchee.Nickname, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"Nickname\" {\n\t\t\tupdatedNickname = true\n\t\t\tpatchee.Nickname = patcher.Nickname\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedEmail && strings.HasPrefix(f, prefix+\"Email.\") {\n\t\t\tif patcher.Email == nil {\n\t\t\t\tpatchee.Email = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.Email == nil {\n\t\t\t\tpatchee.Email = &wrappers.StringValue{}\n\t\t\t}\n\t\t\tchildMask := &field_mask1.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"Email.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm2.MergeWithMask(patcher.Email, patchee.Email, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"Email\" {\n\t\t\tupdatedEmail = true\n\t\t\tpatchee.Email = patcher.Email\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedPassword && strings.HasPrefix(f, prefix+\"Password.\") {\n\t\t\tif patcher.Password == nil {\n\t\t\t\tpatchee.Password = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.Password == nil {\n\t\t\t\tpatchee.Password = &wrappers.StringValue{}\n\t\t\t}\n\t\t\tchildMask := &field_mask1.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"Password.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm2.MergeWithMask(patcher.Password, patchee.Password, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"Password\" {\n\t\t\tupdatedPassword = true\n\t\t\tpatchee.Password = patcher.Password\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedUrl && strings.HasPrefix(f, prefix+\"Url.\") {\n\t\t\tif patcher.Url == nil {\n\t\t\t\tpatchee.Url = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.Url == nil {\n\t\t\t\tpatchee.Url = &wrappers.StringValue{}\n\t\t\t}\n\t\t\tchildMask := &field_mask1.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"Url.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm2.MergeWithMask(patcher.Url, patchee.Url, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"Url\" {\n\t\t\tupdatedUrl = true\n\t\t\tpatchee.Url = patcher.Url\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedUseHtml && strings.HasPrefix(f, prefix+\"UseHtml.\") {\n\t\t\tif patcher.UseHtml == nil {\n\t\t\t\tpatchee.UseHtml = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.UseHtml == nil {\n\t\t\t\tpatchee.UseHtml = &wrappers.BoolValue{}\n\t\t\t}\n\t\t\tchildMask := &field_mask1.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"UseHtml.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm2.MergeWithMask(patcher.UseHtml, patchee.UseHtml, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"UseHtml\" {\n\t\t\tupdatedUseHtml = true\n\t\t\tpatchee.UseHtml = patcher.UseHtml\n\t\t\tcontinue\n\t\t}\n\t\tif !updatedUseSecret && strings.HasPrefix(f, prefix+\"UseSecret.\") {\n\t\t\tif patcher.UseSecret == nil {\n\t\t\t\tpatchee.UseSecret = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif patchee.UseSecret == nil {\n\t\t\t\tpatchee.UseSecret = &wrappers.BoolValue{}\n\t\t\t}\n\t\t\tchildMask := &field_mask1.FieldMask{}\n\t\t\tfor j := i; j < len(updateMask.Paths); j++ {\n\t\t\t\tif trimPath := strings.TrimPrefix(updateMask.Paths[j], prefix+\"UseSecret.\"); trimPath != updateMask.Paths[j] {\n\t\t\t\t\tchildMask.Paths = append(childMask.Paths, trimPath)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := gorm2.MergeWithMask(patcher.UseSecret, patchee.UseSecret, childMask); err != nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\t\tif f == prefix+\"UseSecret\" {\n\t\t\tupdatedUseSecret = true\n\t\t\tpatchee.UseSecret = patcher.UseSecret\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpVoteCount\" {\n\t\t\tpatchee.UpVoteCount = patcher.UpVoteCount\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"DownVoteCount\" {\n\t\t\tpatchee.DownVoteCount = patcher.DownVoteCount\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultApplyFieldMaskTags(ctx context.Context, patchee *Tags, patcher *Tags, updateMask *field_mask.FieldMask, prefix string, db *gorm.DB) (*Tags, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"TagName\" {\n\t\t\tpatchee.TagName = patcher.TagName\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"TagDescription\" {\n\t\t\tpatchee.TagDescription = patcher.TagDescription\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Metadata\" {\n\t\t\tpatchee.Metadata = patcher.Metadata\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultApplyFieldMaskIntPoint(ctx context.Context, patchee *IntPoint, patcher *IntPoint, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*IntPoint, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors.New(\"Patchee inputs to DefaultApplyFieldMaskIntPoint must be non-nil\")\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"X\" {\n\t\t\tpatchee.X = patcher.X\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Y\" {\n\t\t\tpatchee.Y = patcher.Y\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultPatchSetHealthMenstruationPersonalInfo(ctx context.Context, objects []*HealthMenstruationPersonalInfo, updateMasks []*field_mask1.FieldMask, db *gorm1.DB) ([]*HealthMenstruationPersonalInfo, error) {\n\tif len(objects) != len(updateMasks) {\n\t\treturn nil, fmt.Errorf(errors1.BadRepeatedFieldMaskTpl, len(updateMasks), len(objects))\n\t}\n\n\tresults := make([]*HealthMenstruationPersonalInfo, 0, len(objects))\n\tfor i, patcher := range objects {\n\t\tpbResponse, err := DefaultPatchHealthMenstruationPersonalInfo(ctx, patcher, updateMasks[i], db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, pbResponse)\n\t}\n\n\treturn results, nil\n}", "func DefaultListHealthMenstruationDailyEntry(ctx context.Context, db *gorm1.DB, f *query1.Filtering, s *query1.Sorting, p *query1.Pagination, fs *query1.FieldSelection) ([]*HealthMenstruationDailyEntry, error) {\n\tin := HealthMenstruationDailyEntry{}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeListApplyQuery); ok {\n\t\tif db, err = hook.BeforeListApplyQuery(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb, err = gorm2.ApplyCollectionOperators(ctx, db, &HealthMenstruationDailyEntryORM{}, &HealthMenstruationDailyEntry{}, f, s, p, fs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeListFind); ok {\n\t\tif db, err = hook.BeforeListFind(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb = db.Where(&ormObj)\n\tdb = db.Order(\"id\")\n\tormResponse := []HealthMenstruationDailyEntryORM{}\n\tif err := db.Find(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterListFind); ok {\n\t\tif err = hook.AfterListFind(ctx, db, &ormResponse, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse := []*HealthMenstruationDailyEntry{}\n\tfor _, responseEntry := range ormResponse {\n\t\ttemp, err := responseEntry.ToPB(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbResponse = append(pbResponse, &temp)\n\t}\n\treturn pbResponse, nil\n}", "func DefaultApplyFieldMaskProfile(ctx context.Context, patchee *Profile, patcher *Profile, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*Profile, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Notes\" {\n\t\t\tpatchee.Notes = patcher.Notes\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"FirstName\" {\n\t\t\tpatchee.FirstName = patcher.FirstName\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"LastName\" {\n\t\t\tpatchee.LastName = patcher.LastName\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"PrimaryEmail\" {\n\t\t\tpatchee.PrimaryEmail = patcher.PrimaryEmail\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Groups\" {\n\t\t\tpatchee.Groups = patcher.Groups\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfilePictureUrl\" {\n\t\t\tpatchee.ProfilePictureUrl = patcher.ProfilePictureUrl\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultApplyFieldMaskItemSold(ctx context.Context, patchee *ItemSold, patcher *ItemSold, updateMask *field_mask.FieldMask, prefix string, db *gorm.DB) (*ItemSold, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Type\" {\n\t\t\tpatchee.Type = patcher.Type\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultApplyFieldMaskMultiaccountTypeWithoutID(ctx context.Context, patchee *MultiaccountTypeWithoutID, patcher *MultiaccountTypeWithoutID, updateMask *field_mask.FieldMask, prefix string, db *gorm.DB) (*MultiaccountTypeWithoutID, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"SomeField\" {\n\t\t\tpatchee.SomeField = patcher.SomeField\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultPatchHealthMenstruationPersonalInfo(ctx context.Context, in *HealthMenstruationPersonalInfo, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationPersonalInfo, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar pbObj HealthMenstruationPersonalInfo\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadHealthMenstruationPersonalInfo(ctx, &HealthMenstruationPersonalInfo{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskHealthMenstruationPersonalInfo(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationPersonalInfoWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateHealthMenstruationPersonalInfo(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(HealthMenstruationPersonalInfoWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func DefaultCreateHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeCreate_); ok {\n\t\tif db, err = hook.BeforeCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Create(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterCreate_); ok {\n\t\tif err = hook.AfterCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultApplyFieldMaskOwner(ctx context.Context, patchee *Owner, patcher *Owner, updateMask *field_mask.FieldMask, prefix string, db *gorm.DB) (*Owner, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"FirstName\" {\n\t\t\tpatchee.FirstName = patcher.FirstName\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"LastName\" {\n\t\t\tpatchee.LastName = patcher.LastName\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Email\" {\n\t\t\tpatchee.Email = patcher.Email\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Country\" {\n\t\t\tpatchee.Country = patcher.Country\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DefaultListHealthMenstruationDailyEntry executes a gorm list call
func DefaultListHealthMenstruationDailyEntry(ctx context.Context, db *gorm1.DB, f *query1.Filtering, s *query1.Sorting, p *query1.Pagination, fs *query1.FieldSelection) ([]*HealthMenstruationDailyEntry, error) { in := HealthMenstruationDailyEntry{} ormObj, err := in.ToORM(ctx) if err != nil { return nil, err } if hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeListApplyQuery); ok { if db, err = hook.BeforeListApplyQuery(ctx, db, f, s, p, fs); err != nil { return nil, err } } db, err = gorm2.ApplyCollectionOperators(ctx, db, &HealthMenstruationDailyEntryORM{}, &HealthMenstruationDailyEntry{}, f, s, p, fs) if err != nil { return nil, err } if hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeListFind); ok { if db, err = hook.BeforeListFind(ctx, db, f, s, p, fs); err != nil { return nil, err } } db = db.Where(&ormObj) db = db.Order("id") ormResponse := []HealthMenstruationDailyEntryORM{} if err := db.Find(&ormResponse).Error; err != nil { return nil, err } if hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterListFind); ok { if err = hook.AfterListFind(ctx, db, &ormResponse, f, s, p, fs); err != nil { return nil, err } } pbResponse := []*HealthMenstruationDailyEntry{} for _, responseEntry := range ormResponse { temp, err := responseEntry.ToPB(ctx) if err != nil { return nil, err } pbResponse = append(pbResponse, &temp) } return pbResponse, nil }
[ "func DefaultReadHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ormObj.Id == 0 {\n\t\treturn nil, errors1.EmptyIdError\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeReadApplyQuery); ok {\n\t\tif db, err = hook.BeforeReadApplyQuery(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif db, err = gorm2.ApplyFieldSelection(ctx, db, nil, &HealthMenstruationDailyEntryORM{}); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeReadFind); ok {\n\t\tif db, err = hook.BeforeReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tormResponse := HealthMenstruationDailyEntryORM{}\n\tif err = db.Where(&ormObj).First(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormResponse).(HealthMenstruationDailyEntryORMWithAfterReadFind); ok {\n\t\tif err = hook.AfterReadFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormResponse.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultListHealthMenstruationPersonalInfo(ctx context.Context, db *gorm1.DB, f *query1.Filtering, s *query1.Sorting, p *query1.Pagination, fs *query1.FieldSelection) ([]*HealthMenstruationPersonalInfo, error) {\n\tin := HealthMenstruationPersonalInfo{}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeListApplyQuery); ok {\n\t\tif db, err = hook.BeforeListApplyQuery(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb, err = gorm2.ApplyCollectionOperators(ctx, db, &HealthMenstruationPersonalInfoORM{}, &HealthMenstruationPersonalInfo{}, f, s, p, fs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithBeforeListFind); ok {\n\t\tif db, err = hook.BeforeListFind(ctx, db, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb = db.Where(&ormObj)\n\tdb = db.Order(\"id\")\n\tormResponse := []HealthMenstruationPersonalInfoORM{}\n\tif err := db.Find(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationPersonalInfoORMWithAfterListFind); ok {\n\t\tif err = hook.AfterListFind(ctx, db, &ormResponse, f, s, p, fs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse := []*HealthMenstruationPersonalInfo{}\n\tfor _, responseEntry := range ormResponse {\n\t\ttemp, err := responseEntry.ToPB(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbResponse = append(pbResponse, &temp)\n\t}\n\treturn pbResponse, nil\n}", "func DefaultCreateHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeCreate_); ok {\n\t\tif db, err = hook.BeforeCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Create(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterCreate_); ok {\n\t\tif err = hook.AfterCreate_(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\treturn &pbResponse, err\n}", "func DefaultPatchSetHealthMenstruationDailyEntry(ctx context.Context, objects []*HealthMenstruationDailyEntry, updateMasks []*field_mask1.FieldMask, db *gorm1.DB) ([]*HealthMenstruationDailyEntry, error) {\n\tif len(objects) != len(updateMasks) {\n\t\treturn nil, fmt.Errorf(errors1.BadRepeatedFieldMaskTpl, len(updateMasks), len(objects))\n\t}\n\n\tresults := make([]*HealthMenstruationDailyEntry, 0, len(objects))\n\tfor i, patcher := range objects {\n\t\tpbResponse, err := DefaultPatchHealthMenstruationDailyEntry(ctx, patcher, updateMasks[i], db)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults = append(results, pbResponse)\n\t}\n\n\treturn results, nil\n}", "func DefaultPatchHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar pbObj HealthMenstruationDailyEntry\n\tvar err error\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchRead); ok {\n\t\tif db, err = hook.BeforePatchRead(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbReadRes, err := DefaultReadHealthMenstruationDailyEntry(ctx, &HealthMenstruationDailyEntry{Id: in.GetId()}, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpbObj = *pbReadRes\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchApplyFieldMask); ok {\n\t\tif db, err = hook.BeforePatchApplyFieldMask(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif _, err := DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx, &pbObj, in, updateMask, \"\", db); err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&pbObj).(HealthMenstruationDailyEntryWithBeforePatchSave); ok {\n\t\tif db, err = hook.BeforePatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := DefaultStrictUpdateHealthMenstruationDailyEntry(ctx, &pbObj, db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(pbResponse).(HealthMenstruationDailyEntryWithAfterPatchSave); ok {\n\t\tif err = hook.AfterPatchSave(ctx, in, updateMask, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn pbResponse, nil\n}", "func (m *ListHealthMenstruationDailyEntryRequest) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif v, ok := interface{}(m.GetFilter()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn ListHealthMenstruationDailyEntryRequestValidationError{\n\t\t\t\tfield: \"Filter\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetOrderBy()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn ListHealthMenstruationDailyEntryRequestValidationError{\n\t\t\t\tfield: \"OrderBy\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetFields()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn ListHealthMenstruationDailyEntryRequestValidationError{\n\t\t\t\tfield: \"Fields\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetPaging()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn ListHealthMenstruationDailyEntryRequestValidationError{\n\t\t\t\tfield: \"Paging\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func listEntriesHandler(w http.ResponseWriter, r *http.Request) {\n\tlog.Print(\"Querying datastore for entries.\\n\")\n\tvar entities []GuestEntry\n\tq := datastore.NewQuery(\"GuestEntry\").Order(\"-PostedTime\").Limit(20)\n\t_, err := client.GetAll(ctx, q, &entities)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to retrieve entries: %v\\n\", err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tlog.Print(\"Returning entries to caller.\\n\")\n\tjson, err := json.Marshal(entities)\n\tfmt.Fprintf(w, \"%s\", json)\n}", "func (m *ListHealthMenstruationDailyEntryResponse) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tfor idx, item := range m.GetResults() {\n\t\t_, _ = idx, item\n\n\t\tif v, ok := interface{}(item).(interface{ Validate() error }); ok {\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\treturn ListHealthMenstruationDailyEntryResponseValidationError{\n\t\t\t\t\tfield: fmt.Sprintf(\"Results[%v]\", idx),\n\t\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\t\tcause: err,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn nil\n}", "func DefaultApplyFieldMaskHealthMenstruationDailyEntry(ctx context.Context, patchee *HealthMenstruationDailyEntry, patcher *HealthMenstruationDailyEntry, updateMask *field_mask1.FieldMask, prefix string, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif patcher == nil {\n\t\treturn nil, nil\n\t} else if patchee == nil {\n\t\treturn nil, errors1.NilArgumentError\n\t}\n\tvar err error\n\tfor _, f := range updateMask.Paths {\n\t\tif f == prefix+\"Id\" {\n\t\t\tpatchee.Id = patcher.Id\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"CreatedAt\" {\n\t\t\tpatchee.CreatedAt = patcher.CreatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"UpdatedAt\" {\n\t\t\tpatchee.UpdatedAt = patcher.UpdatedAt\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"ProfileId\" {\n\t\t\tpatchee.ProfileId = patcher.ProfileId\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Day\" {\n\t\t\tpatchee.Day = patcher.Day\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"IntensityPercentage\" {\n\t\t\tpatchee.IntensityPercentage = patcher.IntensityPercentage\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Type\" {\n\t\t\tpatchee.Type = patcher.Type\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"Manual\" {\n\t\t\tpatchee.Manual = patcher.Manual\n\t\t\tcontinue\n\t\t}\n\t\tif f == prefix+\"BasedOnPrediction\" {\n\t\t\tpatchee.BasedOnPrediction = patcher.BasedOnPrediction\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn patchee, nil\n}", "func DefaultStrictUpdateHealthMenstruationDailyEntry(ctx context.Context, in *HealthMenstruationDailyEntry, db *gorm1.DB) (*HealthMenstruationDailyEntry, error) {\n\tif in == nil {\n\t\treturn nil, fmt.Errorf(\"Nil argument to DefaultStrictUpdateHealthMenstruationDailyEntry\")\n\t}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlockedRow := &HealthMenstruationDailyEntryORM{}\n\tdb.Model(&ormObj).Set(\"gorm:query_option\", \"FOR UPDATE\").Where(\"id=?\", ormObj.Id).First(lockedRow)\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateCleanup); ok {\n\t\tif db, err = hook.BeforeStrictUpdateCleanup(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithBeforeStrictUpdateSave); ok {\n\t\tif db, err = hook.BeforeStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif err = db.Save(&ormObj).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(HealthMenstruationDailyEntryORMWithAfterStrictUpdateSave); ok {\n\t\tif err = hook.AfterStrictUpdateSave(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse, err := ormObj.ToPB(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &pbResponse, err\n}", "func (h *handler) DailyEventList(ctx context.Context, req *events.DataRequest) (*events.EventsResponse, error) {\n\treturn h.durationEventList(ctx, req, h.calendar.DailyEventList)\n}", "func (m *HealthMenstruationDailyEntry) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for Id\n\n\tif v, ok := interface{}(m.GetCreatedAt()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationDailyEntryValidationError{\n\t\t\t\tfield: \"CreatedAt\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetUpdatedAt()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationDailyEntryValidationError{\n\t\t\t\tfield: \"UpdatedAt\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\t// no validation rules for ProfileId\n\n\tif v, ok := interface{}(m.GetDay()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn HealthMenstruationDailyEntryValidationError{\n\t\t\t\tfield: \"Day\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\t// no validation rules for IntensityPercentage\n\n\t// no validation rules for Type\n\n\t// no validation rules for Manual\n\n\t// no validation rules for BasedOnPrediction\n\n\treturn nil\n}", "func (hh *HealthCheckHandler) List(w http.ResponseWriter, r *http.Request) {\n\tqueryParams := r.URL.Query()\n\tpage, err := strconv.Atoi(queryParams[\"page\"][0])\n\tif err != nil {\n\t\thttp.Error(w, marshalError(err.Error()), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tlist := hh.db.List()\n\tsort.Sort(models.HealthChecks(list))\n\tstart, end := paginate(page, 10, len(list))\n\tpaginated := list[start:end]\n\n\tres := &models.HealthCheckList{\n\t\tItems: paginated,\n\t\tTotal: len(list),\n\t\tPage: page,\n\t\tSize: 10,\n\t}\n\n\tb, err := json.Marshal(res)\n\tif err != nil {\n\t\thttp.Error(w, marshalError(err.Error()), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Write(b)\n}", "func (a *API) ListEventForDay(ctx context.Context, r *empty.Empty) (*proto.ListEventResponse, error) {\n\tstartTime := time.Now().Truncate(24 * time.Hour)\n\tendTime := startTime.Add(24 * time.Hour)\n\n\treturn a.listEventForFilter(ctx, &entities.EventListFilter{\n\t\tStartTimeGt: &startTime,\n\t\tStartTimeLt: &endTime,\n\t})\n}", "func (gs *GreetingService) List(c endpoints.Context, r *GreetingsListReq) (*GreetingsList, error) {\n\tif r.Limit <= 0 {\n\t\tr.Limit = 10\n\t}\n\n\tq := datastore.NewQuery(\"Greeting\").Order(\"-Date\").Limit(r.Limit)\n\tgreets := make([]*Greeting, 0, r.Limit)\n\tkeys, err := q.GetAll(c, &greets)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor i, k := range keys {\n\t\tgreets[i].Key = k\n\t}\n\treturn &GreetingsList{greets}, nil\n}", "func (v MeetingsResource) List(c buffalo.Context) error {\n\t// Get the DB connection from the context\n\ttx, ok := c.Value(\"tx\").(*pop.Connection)\n\tif !ok {\n\t\treturn errors.WithStack(errors.New(\"no transaction found\"))\n\t}\n\n\tmeetings := &models.Meetings{}\n\n\t// Paginate results. Params \"page\" and \"per_page\" control pagination.\n\t// Default values are \"page=1\" and \"per_page=20\".\n\tq := tx.PaginateFromParams(c.Params())\n\n\t// Retrieve all Meetings from the DB\n\tif err := q.All(meetings); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\t// Add the paginator to the headers so clients know how to paginate.\n\tc.Response().Header().Set(\"X-Pagination\", q.Paginator.String())\n\n\treturn c.Render(200, r.JSON(meetings))\n}", "func DefaultListLogActivity(ctx context.Context, db *gorm1.DB) ([]*LogActivity, error) {\n\tin := LogActivity{}\n\tormObj, err := in.ToORM(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(LogActivityORMWithBeforeListApplyQuery); ok {\n\t\tif db, err = hook.BeforeListApplyQuery(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb, err = gorm2.ApplyCollectionOperators(ctx, db, &LogActivityORM{}, &LogActivity{}, nil, nil, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(LogActivityORMWithBeforeListFind); ok {\n\t\tif db, err = hook.BeforeListFind(ctx, db); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdb = db.Where(&ormObj)\n\tdb = db.Order(\"id\")\n\tormResponse := []LogActivityORM{}\n\tif err := db.Find(&ormResponse).Error; err != nil {\n\t\treturn nil, err\n\t}\n\tif hook, ok := interface{}(&ormObj).(LogActivityORMWithAfterListFind); ok {\n\t\tif err = hook.AfterListFind(ctx, db, &ormResponse); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tpbResponse := []*LogActivity{}\n\tfor _, responseEntry := range ormResponse {\n\t\ttemp, err := responseEntry.ToPB(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbResponse = append(pbResponse, &temp)\n\t}\n\treturn pbResponse, nil\n}", "func (d *OneshotDB) List() []OneshotEntry {\n\tlist := []OneshotEntry{}\n\n\tfor _, entry := range d.mapping {\n\t\tlist = append(list, entry)\n\t}\n\n\tsort.Slice(list, func(i, j int) bool {\n\t\ta := list[i].Creation\n\t\tb := list[j].Creation\n\t\treturn a.After(b)\n\t})\n\n\treturn list\n}", "func (b Budget) List(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\n\tctx, span := trace.StartSpan(ctx, \"handlers.Budget.List\")\n\tdefer span.End()\n\n\tlist, err := budget.List(ctx, b.DB)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn web.Respond(ctx, w, list, http.StatusOK)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rank returns how many nodes less than max value.
func (t *Tree) Rank(max int) int { return rank(t.Tree, max) }
[ "func (f SVDFactors) Rank(epsilon float64) int {\n\tif len(f.Sigma) == 0 {\n\t\treturn 0\n\t}\n\ttol := float64(max(f.m, len(f.Sigma))) * f.Sigma[0] * epsilon\n\tvar r int\n\tfor _, v := range f.Sigma {\n\t\tif v > tol {\n\t\t\tr++\n\t\t}\n\t}\n\treturn r\n}", "func (rb *Bitmap) Rank(x uint32) uint64 {\n\tsize := uint64(0)\n\tfor i := 0; i < rb.highlowcontainer.size(); i++ {\n\t\tkey := rb.highlowcontainer.getKeyAtIndex(i)\n\t\tif key > highbits(x) {\n\t\t\treturn size\n\t\t}\n\t\tif key < highbits(x) {\n\t\t\tsize += uint64(rb.highlowcontainer.getContainerAtIndex(i).getCardinality())\n\t\t} else {\n\t\t\treturn size + uint64(rb.highlowcontainer.getContainerAtIndex(i).rank(lowbits(x)))\n\t\t}\n\t}\n\treturn size\n}", "func (t *Tree) Rank(val float64) int {\n\treturn t.root.Rank(val)\n}", "func (svd *SVD) Rank(rcond float64) int {\n\tif rcond < 0 {\n\t\tpanic(badRcond)\n\t}\n\tif !svd.succFact() {\n\t\tpanic(badFact)\n\t}\n\ts0 := svd.s[0]\n\tfor i, v := range svd.s {\n\t\tif v <= rcond*s0 {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn len(svd.s)\n}", "func (e *Election) Rank() []int {\n\tres := make([]int, e.N)\n\tfor i := 0; i < e.N; i++ {\n\t\tfor j := i + 1; j < e.N; j++ {\n\t\t\tr := e.cmp(i, j)\n\t\t\tif r < 0 {\n\t\t\t\tres[i]++\n\t\t\t} else if r > 0 {\n\t\t\t\tres[j]++\n\t\t\t}\n\t\t}\n\t}\n\treturn res\n}", "func MaxRank(b Player, w Player) Rank {\n\tif b.Rank > w.Rank {\n\t\treturn b.Rank\n\t}\n\treturn w.Rank\n}", "func rank(square int) int {\n\treturn 8 - square/8\n}", "func (lt sdkLinkType) rank() int {\n\treturn int(lt)\n}", "func (alg *TopologicalSorter[V]) Rank(vtx V) (int, bool) {\n\tr, ok := alg.ranks[vtx]\n\treturn r, ok\n}", "func (g *graph) find_max_value(chk *checklist) int {\n\tcurrent := 0\n\tidx := -1\n\tfor i,c := range chk.nodes_count {\n\t\tif c > current {\n\t\t\tidx = i\n\t\t\tcurrent = c\n\t\t}\n\t}\n\tif idx >= 0 { chk.nodes_count[idx] = -1 }\n\treturn idx\n}", "func RankLTE(v int) predicate.Transactionfactoritemtmp {\n\treturn predicate.Transactionfactoritemtmp(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldRank), v))\n\t})\n}", "func MapRankToScore(rank, size int) float64 { return float64(size - rank) }", "func rank(m *Matrix) int {\n col, _ := col(m);\n if (col == nil) {\n return 0;\n }\n count := 0;\n for i := 0; i < len(col.cols); i++ {\n count += 1;\n }\n return count;\n}", "func RankLTE(v float64) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldRank), v))\n\t})\n}", "func (o *Host) GetRank() int32 {\n\tif o == nil || o.Rank == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.Rank\n}", "func (z *Skiplist) RankOfLastInRange(spec RangeSpec) int {\n\tif !z.isInRange(spec) {\n\t\treturn -1\n\t}\n\n\tlastNodeRank := -1\n\tx := z.head\n\tfor i := z.level - 1; i >= 0; i-- {\n\t\tfor x.level[i].forward != nil && spec.lteMax(x.level[i].forward.ordered) {\n\t\t\tlastNodeRank += x.level[i].span\n\t\t\tx = x.level[i].forward\n\t\t}\n\t}\n\n\tif !spec.gteMin(x.ordered) {\n\t\treturn -1\n\t}\n\n\treturn lastNodeRank\n}", "func (wf WindowFrame) Rank() int {\n\treturn wf.RowIdx + 1\n}", "func (g *Generation) rank() {\n\tlessFn := func(i, j int) bool {\n\t\treturn g.Players[i].Score > g.Players[j].Score\n\t}\n\tsort.SliceStable(g.Players, lessFn)\n}", "func (r *SlidingWindow) Max() int {return r.base + len(r.values) - 1}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
RegisterChannelzServiceToServer registers the channelz service to the given server. Note: it is preferred to use the admin API ( instead to register Channelz and other administrative services.
func RegisterChannelzServiceToServer(s grpc.ServiceRegistrar) { channelzgrpc.RegisterChannelzServer(s, newCZServer()) }
[ "func RegisterServer(srv *grpc.Server, c Checker) {\n\tv2 := &authV2{Checker: c}\n\tv3 := &authV3{Checker: c}\n\n\tenvoy_service_auth_v2.RegisterAuthorizationServer(srv, v2)\n\tenvoy_service_auth_v3.RegisterAuthorizationServer(srv, v3)\n}", "func (m *Manager) RegisterServer(\n\tctx context.Context,\n\treq *topomgr.RegisterServerRequest) (*walleapi.Empty, error) {\n\tputCtx, err := m.registerServer(req)\n\tif err := resolvePutCtx(ctx, putCtx, err); err != nil {\n\t\treturn nil, err\n\t}\n\tzlog.Infof(\n\t\t\"[tm] updated server: %s : %s -> %s\", req.ClusterUri, req.ServerId, req.ServerInfo)\n\treturn &walleapi.Empty{}, nil\n}", "func (rpcsrv *RPCService) RegisterServer(s *rpc.Server, r *mux.Router) {\n\trpcsrv.RPCServer = s\n\trpcsrv.HTTPRouter = r\n}", "func RegisterServerBMServer(e *bm.Engine, server ServerBMServer) {\n\tServerSvc = server\n\te.GET(\"/server.service.v1.Server/Ping\", serverPing)\n\te.GET(\"/server.service.v1.Server/SayHello\", serverSayHello)\n\te.GET(\"/kratos-demo/say_hello\", serverSayHelloURL)\n}", "func (rt *Runtime) RegisterGRPCServer(server grpcServer, funcNamePrefix string) {\n\tserver.setRuntime(rt)\n\trt.Worker.grpcServers[funcNamePrefix] = server\n}", "func RegisterServer(name string, c ServerCreator) {\n\tserverMap[name] = c\n}", "func (s *Service) RegisterGRPCService(g *grpc.Server) {\n}", "func RegisterServer(conn *network.TcpConn, body []byte) (interface{}, error) {\n log.Println(\"RegisterServer:\", string(body))\n \n if conn.Status != network.ConnInit {\n result := \"has inited!\"\n return nil, errors.New(result)\n }\n\n\treq := protocol.IRegisterServer{}\n\terr := protocol.Decode(&req, body)\n\tres := &protocol.ORegisterServer{}\n\tif err != nil {\n\t\tresult := \"error!\"\n\t\treturn nil, errors.New(result)\n\t}\n \n\tserver := ServerInfo{\n ServerInfo:req.ServerInfo,\n\t\tConn: conn,\n\t}\n\n\tconn.AttachID = req.ServerID\n conn.Status = network.ConnRegister\n\tServerMgrInstance.Register(server)\n\treturn res, nil\n}", "func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) {\n\tstr := &HealthService{\n\t\tCheck: srv.Check,\n\t\tWatch: srv.Watch,\n\t}\n\tRegisterHealthService(s, str)\n}", "func (gs *GatewayServer) RegisterServices(s *grpc.Server) {\n\tttnpb.RegisterGsServer(s, gs)\n\tttnpb.RegisterNsGsServer(s, gs)\n\tttnpb.RegisterGtwGsServer(s, iogrpc.New(gs))\n}", "func RegisterDispatchServer(r grpc.ServiceRegistrar, s v1.DispatchServiceServer) *grpc.ServiceDesc {\n\tr.RegisterService(grpcutil.WrapMethods(v1.DispatchService_ServiceDesc, grpcutil.DefaultUnaryMiddleware...), s)\n\treturn &v1.DispatchService_ServiceDesc\n}", "func RegisterModelCenterServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ModelCenterServiceServer) error {\n\n\tmux.Handle(\"POST\", pattern_ModelCenterService_CreateRegisteredModel_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_ModelCenterService_CreateRegisteredModel_0(rctx, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_ModelCenterService_CreateRegisteredModel_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"PATCH\", pattern_ModelCenterService_UpdateRegisteredModel_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_ModelCenterService_UpdateRegisteredModel_0(rctx, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_ModelCenterService_UpdateRegisteredModel_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"PATCH\", pattern_ModelCenterService_DeleteRegisteredModel_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_ModelCenterService_DeleteRegisteredModel_0(rctx, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_ModelCenterService_DeleteRegisteredModel_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"GET\", pattern_ModelCenterService_ListRegisteredModels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_ModelCenterService_ListRegisteredModels_0(rctx, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_ModelCenterService_ListRegisteredModels_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"GET\", pattern_ModelCenterService_GetRegisteredModelDetail_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_ModelCenterService_GetRegisteredModelDetail_0(rctx, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_ModelCenterService_GetRegisteredModelDetail_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_ModelCenterService_CreateModelVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_ModelCenterService_CreateModelVersion_0(rctx, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_ModelCenterService_CreateModelVersion_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"PATCH\", pattern_ModelCenterService_UpdateModelVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_ModelCenterService_UpdateModelVersion_0(rctx, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_ModelCenterService_UpdateModelVersion_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"PATCH\", pattern_ModelCenterService_DeleteModelVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_ModelCenterService_DeleteModelVersion_0(rctx, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_ModelCenterService_DeleteModelVersion_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"GET\", pattern_ModelCenterService_GetModelVersionDetail_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tvar stream runtime.ServerTransportStream\n\t\tctx = grpc.NewContextWithServerTransportStream(ctx, &stream)\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := local_request_ModelCenterService_GetModelVersionDetail_0(rctx, inboundMarshaler, server, req, pathParams)\n\t\tmd.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_ModelCenterService_GetModelVersionDetail_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\treturn nil\n}", "func RegisterPayLiveBMServer(e *bm.Engine, server PayLiveBMServer) {\n\tv1PayLiveSvc = server\n\te.POST(\"/live.liveadmin.v1.PayLive/add\", payLiveAdd)\n\te.POST(\"/live.liveadmin.v1.PayLive/update\", payLiveUpdate)\n\te.GET(\"/live.liveadmin.v1.PayLive/getList\", payLiveGetList)\n\te.POST(\"/live.liveadmin.v1.PayLive/close\", payLiveClose)\n\te.POST(\"/live.liveadmin.v1.PayLive/open\", payLiveOpen)\n}", "func (h *grpcChannelzHandler) writeServer(w io.Writer, server int64) {\n\tif err := serverTemplate.Execute(w, h.getServer(server)); err != nil {\n\t\tlog.Errorf(\"channelz: executing template: %v\", err)\n\t}\n}", "func RegisterService(server *grpc.Server, service Backend) {\n\tserver.RegisterService(&serviceDesc, service)\n}", "func RegisterService(s *grpc.Server, service *Service) {\n\tentry.RegisterEntryServer(s, service)\n}", "func (s *PostSvc) RegisterWithGRPCServer(g *grpc.Server) error {\n\tpb.RegisterPostServer(g, s)\n\treturn nil\n}", "func RegisterServer(s *grpc.Server) *health.Server {\n\tsrv := health.NewServer()\n\thealthpb.RegisterHealthServer(s, srv)\n\treturn srv\n}", "func registerServer(port uint16, conn *connect) (*Server, error) {\n\ts := &Server{\n\t\trpcServer: grpc.NewServer(),\n\t\tconn: conn,\n\t\tp: newOpenConfigParser(nil, ocdcShowSyntax),\n\t}\n\treq := &pb.RegisterModuleRequest{\n\t\tModule: serverName,\n\t\tPort: fmt.Sprintf(\"%d\", port),\n\t}\n\n\t// register a module to an RPC server\n\tclient := pb.NewRegisterClient(s.conn.cliconn)\n\tif _, err := client.DoRegisterModule(context.Background(), req); err != nil {\n\t\treturn nil, fmt.Errorf(\"client DoRegisterModule failed: %v\", err)\n\t}\n\n\t// register commands to an RPC server\n\tif err := s.rpcRegisterCommand(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tlis, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", port))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// register a server and an RPC server to OpenConfigd\n\tpb.RegisterShowServer(s.rpcServer, s)\n\n\tgo s.rpcServer.Serve(lis)\n\n\treturn s, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parseFunctions ... Reads a parsers.of lines and parses Function structs from it.
func (i Interface) parseFunctions(contentLines []string) []Function { var functions []Function for _, line := range contentLines { if isPureVirtualDefinition(line) { newFunction := NewFunction(line) functions = append(functions, *newFunction) } } return functions }
[ "func ParseGo(code string) (functions map[uint64]*function.Function) {\n\n\tcodeLines := strings.Split(code, \"\\n\")\n\n\tfunctions = make(map[uint64]*function.Function)\n\n\tvar (\n\t\tstartLine uint64\n\t\tendLine uint64\n\t\tcomment string\n\t\tfunctionContent string\n\t\tstate = commentSearch\n\t)\n\n\tfor idx, line := range codeLines {\n\t\tlineIdx := uint64(idx + 1)\n\t\t// Searching for comment or \"func\"/\"type\" keywords\n\t\tstrings.ReplaceAll(line, \"\\r\", \"\")\n\n\t\t// We found a comment. Transition state to commentStart\n\t\tif strings.HasPrefix(line, \"//\") && state != commentStart {\n\t\t\tstate = commentStart\n\t\t\tstartLine = lineIdx\n\n\t\t} else if strings.Contains(line, \"func\") || strings.Contains(line, \"type\") {\n\n\t\t\t// we found the function keyword so we transition to funcStart state\n\t\t\tif state == commentSearch {\n\t\t\t\t// If we're coming from commentSearch, that means that we didn't have a comment so we set startLine to idx\n\t\t\t\tstartLine = lineIdx\n\n\t\t\t}\n\t\t\t// otherwise, we're coming from commentStart, that means that we had a comment so we leave startLine as it is\n\t\t\tstate = funcStart\n\t\t} else if strings.HasPrefix(line, \"}\") {\n\t\t\tstate = funcEnd\n\t\t\tendLine = lineIdx\n\n\t\t} else if !(strings.HasPrefix(line, \"//\")) && state != funcStart {\n\t\t\tstate = commentSearch\n\t\t\tcomment = \"\"\n\t\t\tstartLine = 0\n\t\t\tendLine = 0\n\n\t\t}\n\n\t\tswitch state {\n\t\tcase commentSearch:\n\t\t\tcontinue\n\t\tcase commentStart:\n\t\t\tcomment += fmt.Sprintf(\"%v\\n\", line)\n\t\tcase funcStart:\n\t\t\tfunctionContent += fmt.Sprintf(\"%v\\n\", line)\n\n\t\tcase funcEnd:\n\t\t\t// add the closing brace\n\t\t\tfunctionContent += fmt.Sprintf(\"%v\\n\", line)\n\t\t\tendLine = uint64(idx)\n\n\t\t\t// create a new function object with the information we got\n\t\t\tf := function.NewFunction(comment, functionContent, \"noNameYet\", 0, startLine, endLine)\n\n\t\t\t// add that to our map\n\t\t\tfunctions[uint64(f.FuncID)] = f\n\n\t\t\t// reset our state machine\n\t\t\tstartLine = 0\n\t\t\tcomment = \"\"\n\t\t\tfunctionContent = \"\"\n\t\t\tstate = commentSearch\n\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\n\t}\n\n\treturn\n}", "func ParseC(code string) (functions map[uint64]*function.Function) {\n\n\tcodeLines := strings.Split(code, \"\\n\")\n\tfunctions = make(map[uint64]*function.Function)\n\n\tvar (\n\t\tstartLine uint64\n\t\tendLine uint64\n\t\tcomment string\n\t\tfunctionContent string\n\t\tstate = commentSearch\n\t)\n\n\tfor idx, line := range codeLines {\n\t\tif strings.HasPrefix(line, \"//\") {\n\t\t\tstate = commentStart\n\t\t} else if cFuncMatch.MatchString(line) ||\n\t\t\t(strings.Contains(line, \"template\") && strings.Contains(line, \"typename\")) {\n\t\t\tif state == commentSearch {\n\t\t\t\t// If we're coming from commentSearch, that means that we didn't have a comment so we set startLine to idx\n\t\t\t\tstartLine = uint64(idx + 1)\n\t\t\t}\n\t\t\tstate = funcStart\n\t\t} else if strings.Contains(line, \"struct\") && strings.Contains(line, \"{\") {\n\t\t\tif state == commentSearch {\n\t\t\t\tstartLine = uint64(idx + 1)\n\t\t\t}\n\t\t\tstate = funcStart\n\t\t} else if strings.HasPrefix(line, \"}\") {\n\t\t\tstate = funcEnd\n\t\t} else if !(strings.HasPrefix(line, \"//\")) && state != funcStart {\n\t\t\tstate = commentSearch\n\t\t\tcomment = \"\"\n\t\t}\n\n\t\tswitch state {\n\t\tcase commentSearch:\n\t\t\tcontinue\n\t\tcase commentStart:\n\t\t\tstartLine = uint64(idx + 1)\n\t\t\tcomment += fmt.Sprintf(\"%s\\n\", line)\n\t\tcase funcStart:\n\t\t\tfunctionContent += fmt.Sprintf(\"%v\\n\", line)\n\t\tcase funcEnd:\n\t\t\tendLine = uint64(idx + 1)\n\t\t\t// add the closing brace\n\t\t\tfunctionContent += fmt.Sprintf(\"%v\\n\", line)\n\t\t\t// create a new function object with the information we got\n\t\t\tf := function.NewFunction(comment, functionContent, \"noNameYet\", 0, startLine, endLine)\n\t\t\t// add that to our map\n\t\t\tfunctions[uint64(f.FuncID)] = f\n\t\t\t// reset our state machine\n\t\t\tstartLine = 0\n\t\t\tcomment = \"\"\n\t\t\tfunctionContent = \"\"\n\t\t\tstate = commentSearch\n\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t}\n\treturn\n}", "func ParseFunction(s string) (prefix string, funcname string, f []string, r []string, err error) {\n\tdefer func() {\n\t\tif len(f) == 1 && f[0] == \"void\" {\n\t\t\tf = nil\n\t\t}\n\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"cannot parse function '%s' : %v\", s, err)\n\t\t} else {\n\t\t\tprefix = strings.TrimSpace(prefix)\n\t\t\tfuncname = strings.TrimSpace(funcname)\n\t\t\tfor i := range r {\n\t\t\t\tr[i] = strings.TrimSpace(r[i])\n\t\t\t}\n\t\t\tfor i := range f {\n\t\t\t\tf[i] = strings.TrimSpace(f[i])\n\t\t\t}\n\t\t}\n\t}()\n\n\t// remove specific attribute for function longjmp\n\ts = strings.Replace(s, \"__attribute__((noreturn))\", \"\", -1)\n\n\ts = strings.TrimSpace(s)\n\tif !IsFunction(s) {\n\t\terr = fmt.Errorf(\"is not function : %s\", s)\n\t\treturn\n\t}\n\tvar returns string\n\tvar arguments string\n\t{\n\t\t// Example of function types :\n\t\t// int (*)(int, float)\n\t\t// int (int, float)\n\t\t// int (*)(int (*)(int))\n\t\t// void (*(*)(int *, void *, const char *))(void)\n\t\tif s[len(s)-1] != ')' {\n\t\t\terr = fmt.Errorf(\"function type |%s| haven't last symbol ')'\", s)\n\t\t\treturn\n\t\t}\n\t\tcounter := 1\n\t\tvar pos int\n\t\tfor i := len(s) - 2; i >= 0; i-- {\n\t\t\tif i == 0 {\n\t\t\t\terr = fmt.Errorf(\"don't found '(' in type : %s\", s)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif s[i] == ')' {\n\t\t\t\tcounter++\n\t\t\t}\n\t\t\tif s[i] == '(' {\n\t\t\t\tcounter--\n\t\t\t}\n\t\t\tif counter == 0 {\n\t\t\t\tpos = i\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\t// s[:pos] = `speed_t cfgetospeed`\n\t\tif unicode.IsNumber(rune(s[pos-1])) || unicode.IsLetter(rune(s[pos-1])) {\n\t\t\tfor i := pos - 1; i >= 0; i-- {\n\t\t\t\tif s[i] == ' ' {\n\t\t\t\t\tfuncname = s[i+1 : pos]\n\t\t\t\t\treturns = strings.TrimSpace(s[:i])\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\treturns = strings.TrimSpace(s[:pos])\n\t\t}\n\t\targuments = strings.TrimSpace(s[pos:])\n\t}\n\tif arguments == \"\" {\n\t\terr = fmt.Errorf(\"cannot parse (right part is nil) : %v\", s)\n\t\treturn\n\t}\n\t// separate fields of arguments\n\t{\n\t\tpos := 1\n\t\tcounter := 0\n\t\tfor i := 1; i < len(arguments)-1; i++ {\n\t\t\tif arguments[i] == '(' {\n\t\t\t\tcounter++\n\t\t\t}\n\t\t\tif arguments[i] == ')' {\n\t\t\t\tcounter--\n\t\t\t}\n\t\t\tif counter == 0 && arguments[i] == ',' {\n\t\t\t\tf = append(f, strings.TrimSpace(arguments[pos:i]))\n\t\t\t\tpos = i + 1\n\t\t\t}\n\t\t}\n\t\tf = append(f, strings.TrimSpace(arguments[pos:len(arguments)-1]))\n\t}\n\n\t// returns\n\t// Example: __ssize_t\n\tif returns[len(returns)-1] != ')' {\n\t\tr = append(r, returns)\n\t\treturn\n\t}\n\n\t// Example: void ( *(*)(int *, void *, char *))\n\t// ------- --------------------------- return type\n\t// == prefix\n\t// ++++++++++++++++++++++++++++++ block\n\t// return type : void (*)(int *, void *, char *)\n\t// prefix : *\n\t// Find the block\n\tvar counter int\n\tvar position int\n\tfor i := len(returns) - 1; i >= 0; i-- {\n\t\tif returns[i] == ')' {\n\t\t\tcounter++\n\t\t}\n\t\tif returns[i] == '(' {\n\t\t\tcounter--\n\t\t}\n\t\tif counter == 0 {\n\t\t\tposition = i\n\t\t\tbreak\n\t\t}\n\t}\n\tblock := string([]byte(returns[position:]))\n\treturns = returns[:position]\n\n\t// Examples returns:\n\t// int (*)\n\t// char *(*)\n\t// block is : (*)\n\tif block == \"(*)\" {\n\t\tr = append(r, returns)\n\t\treturn\n\t}\n\n\tindex := strings.Index(block, \"(*)\")\n\tif index < 0 {\n\t\tif strings.Count(block, \"(\") == 1 {\n\t\t\t// Examples returns:\n\t\t\t// int ( * [2])\n\t\t\t// ------ return type\n\t\t\t// ====== prefix\n\t\t\t// ++++++++ block\n\t\t\tbBlock := []byte(block)\n\t\t\tfor i := 0; i < len(bBlock); i++ {\n\t\t\t\tswitch bBlock[i] {\n\t\t\t\tcase '(', ')':\n\t\t\t\t\tbBlock[i] = ' '\n\t\t\t\t}\n\t\t\t}\n\t\t\tbBlock = bytes.Replace(bBlock, []byte(\"*\"), []byte(\"\"), 1)\n\t\t\tprefix = string(bBlock)\n\t\t\tr = append(r, returns)\n\t\t\treturn\n\t\t}\n\t\t// void (*(int *, void *, const char *))\n\t\t// ++++++++++++++++++++++++++++++++ block\n\t\tblock = block[1 : len(block)-1]\n\t\tindex := strings.Index(block, \"(\")\n\t\tif index < 0 {\n\t\t\terr = fmt.Errorf(\"cannot found '(' in block\")\n\t\t\treturn\n\t\t}\n\t\treturns = returns + block[index:]\n\t\tprefix = block[:index]\n\t\tif strings.Contains(prefix, \"*\") {\n\t\t\tprefix = strings.Replace(prefix, \"*\", \"\", 1)\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"undefined situation\")\n\t\t\treturn\n\t\t}\n\t\tr = append(r, returns)\n\t\treturn\n\t}\n\tif len(block)-1 > index+3 && block[index+3] == '(' {\n\t\t// Examples returns:\n\t\t// void ( *(*)(int *, void *, char *))\n\t\t// ++++++++++++++++++++++++++++++ block\n\t\t// ^^ check this\n\t\tblock = strings.Replace(block, \"(*)\", \"\", 1)\n\t\tblock = block[1 : len(block)-1]\n\t\tindex := strings.Index(block, \"(\")\n\t\tif index < 0 {\n\t\t\terr = fmt.Errorf(\"cannot found '(' in block\")\n\t\t\treturn\n\t\t}\n\n\t\treturns = returns + block[index:]\n\t\t// example of block[:index]\n\t\t// `*signal`\n\t\t// `* signal`\n\t\tif pr := strings.TrimSpace(block[:index]); unicode.IsLetter(rune(pr[len(pr)-1])) ||\n\t\t\tunicode.IsNumber(rune(pr[len(pr)-1])) {\n\t\t\tpr = strings.Replace(pr, \"*\", \" * \", -1)\n\t\t\tfor i := len(pr) - 1; i >= 0; i-- {\n\t\t\t\tif unicode.IsLetter(rune(pr[i])) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif unicode.IsNumber(rune(pr[i])) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tprefix = pr[:i]\n\t\t\t\tfuncname = pr[i:]\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tprefix = block[:index]\n\t\t}\n\n\t\tr = append(r, returns)\n\t\treturn\n\t}\n\n\t// Examples returns:\n\t// int ( *( *(*)))\n\t// ----- return type\n\t// ========= prefix\n\t// +++++++++++ block\n\tbBlock := []byte(block)\n\tfor i := 0; i < len(bBlock); i++ {\n\t\tswitch bBlock[i] {\n\t\tcase '(', ')':\n\t\t\tbBlock[i] = ' '\n\t\t}\n\t}\n\tbBlock = bytes.Replace(bBlock, []byte(\"*\"), []byte(\"\"), 1)\n\tprefix = string(bBlock)\n\tr = append(r, returns)\n\n\treturn\n}", "func ParseFunctions(filePath string) *TemplateValues {\n\tfset := token.NewFileSet()\n\tf, err := parser.ParseFile(fset, filePath, nil, parser.ParseComments)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar funcInfos []FunctionInfo\n\tpackageName := fmt.Sprint(f.Name)\n\tcontainsMux := false\n\n\tfor _, decl := range f.Decls {\n\t\tswitch t := decl.(type) {\n\t\tcase *ast.FuncDecl:\n\t\t\tresponseWriterParamExists := false\n\t\t\trequestParamExists := false\n\t\t\tfor _, param := range t.Type.Params.List {\n\t\t\t\tswitch t2 := param.Type.(type) {\n\t\t\t\tcase *ast.SelectorExpr:\n\t\t\t\t\tparamName := fmt.Sprint(t2.Sel.Name)\n\t\t\t\t\tif paramName == \"ResponseWriter\" {\n\t\t\t\t\t\tresponseWriterParamExists = true\n\t\t\t\t\t}\n\t\t\t\tcase *ast.StarExpr:\n\t\t\t\t\tparamName := fmt.Sprint(t2.X)\n\t\t\t\t\tif paramName == \"&{http Request}\" {\n\t\t\t\t\t\trequestParamExists = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif responseWriterParamExists && requestParamExists {\n\t\t\t\tmuxVars := getMuxVars(t)\n\t\t\t\tif len(muxVars) > 0 {\n\t\t\t\t\tcontainsMux = true\n\t\t\t\t}\n\t\t\t\tfuncInfo := FunctionInfo{\n\t\t\t\t\tName: fmt.Sprint(t.Name),\n\t\t\t\t\tMuxVars: muxVars,\n\t\t\t\t}\n\t\t\t\tfuncInfos = append(funcInfos, funcInfo)\n\t\t\t}\n\t\t}\n\t}\n\ttemplateValues := TemplateValues{\n\t\tFuncInfo: funcInfos,\n\t\tPackageName: packageName,\n\t\tContainsMux: containsMux,\n\t}\n\treturn &templateValues\n}", "func (p *parser) parseFunction(typ uint8) (expr *tree.FunctionExpr, names tree.FuncNameList) {\n\texpr = &tree.FunctionExpr{}\n\texpr.FuncToken = p.expectToken(token.FUNCTION)\n\tif typ > funcExpr {\n\t\tnames.Items = append(names.Items, p.expectToken(token.NAME))\n\t\tif typ > funcLocal {\n\t\t\tfor p.tok == token.DOT {\n\t\t\t\tnames.Seps = append(names.Seps, p.tokenNext())\n\t\t\t\tnames.Items = append(names.Items, p.expectToken(token.NAME))\n\t\t\t}\n\t\t\tif p.tok == token.COLON {\n\t\t\t\tnames.ColonToken = p.tokenNext()\n\t\t\t\tnames.MethodToken = p.expectToken(token.NAME)\n\t\t\t}\n\t\t}\n\t}\n\texpr.LParenToken = p.expectToken(token.LPAREN)\n\tif p.tok == token.NAME {\n\t\texpr.Params = &tree.NameList{Items: []tree.Token{p.expectToken(token.NAME)}}\n\t\tfor p.tok == token.COMMA {\n\t\t\tsepToken := p.tokenNext()\n\t\t\tif p.tok == token.VARARG {\n\t\t\t\texpr.VarArgSepToken = sepToken\n\t\t\t\texpr.VarArgToken = p.tokenNext()\n\t\t\t\tbreak\n\t\t\t}\n\t\t\texpr.Params.Seps = append(expr.Params.Seps, sepToken)\n\t\t\texpr.Params.Items = append(expr.Params.Items, p.expectToken(token.NAME))\n\t\t}\n\t} else if p.tok == token.VARARG {\n\t\texpr.VarArgToken = p.tokenNext()\n\t}\n\texpr.RParenToken = p.expectToken(token.RPAREN)\n\texpr.Body = p.parseBlockBody(token.END)\n\texpr.EndToken = p.expectToken(token.END)\n\treturn expr, names\n}", "func readFunctions(fnsNode confl.Node) (map[string]*Function, error) {\n\tif fnsNode.Type() != confl.MapType {\n\t\treturn nil, errors.New(\"Expected map for Functions section\")\n\t}\n\n\tfunctions := make(map[string]*Function)\n\n\tfor _, pair := range confl.KVPairs(fnsNode) {\n\t\tfnDef, fnErr := readFunction(pair.Key, pair.Value)\n\t\tif fnErr != nil {\n\t\t\treturn nil, fnErr\n\t\t}\n\n\t\tfunctions[fnDef.Name] = fnDef\n\t}\n\n\treturn functions, nil\n}", "func ParseFunctionDefinition(rd io.Reader) (*FunctionDefinition, error) {\n\ts := &scanner.Scanner{Mode: scanner.GoTokens}\n\ts.Init(rd)\n\tstate := StateInit\n\tvar f FunctionDefinition\n\ttokens := make([]string, 0)\n\tfor {\n\t\tr := s.Scan()\n\t\tif r == scanner.EOF {\n\t\t\tbreak\n\t\t}\n\t\tswitch state {\n\t\tcase StateInit:\n\t\t\tswitch r {\n\t\t\tcase '(':\n\t\t\t\tif len(tokens) < 2 {\n\t\t\t\t\treturn nil, fmt.Errorf(\"function definition needs at least a name and a type\")\n\t\t\t\t}\n\t\t\t\tf.Name = tokens[0]\n\t\t\t\tvar typ string\n\t\t\t\tfor _, t := range tokens[1:] {\n\t\t\t\t\tswitch t {\n\t\t\t\t\t// ignore WINAPI calling convention\n\t\t\t\t\tcase \"WINAPI\":\n\t\t\t\t\tdefault:\n\t\t\t\t\t\ttyp = t\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tf.Type = translate(typ)\n\t\t\t\tif f.Type == \"\" {\n\t\t\t\t\treturn nil, fmt.Errorf(\"did not find translation type for %s\", typ)\n\t\t\t\t}\n\t\t\t\ttokens = tokens[0:0]\n\t\t\t\tstate = StateParam\n\t\t\tcase scanner.Ident:\n\t\t\t\ttokens = append([]string{s.TokenText()}, tokens...)\n\t\t\tdefault:\n\t\t\t\treturn nil, fmt.Errorf(\"parse error: got %s\", scanner.TokenString(r))\n\t\t\t}\n\t\tcase StateParam:\n\t\t\tswitch r {\n\t\t\tcase scanner.Ident:\n\t\t\t\ttokens = append([]string{s.TokenText()}, tokens...)\n\t\t\tcase ',', ')':\n\t\t\t\tif len(tokens) < 2 {\n\t\t\t\t\treturn nil, fmt.Errorf(\"function parameter needs at least a name and a type\")\n\t\t\t\t}\n\n\t\t\t\tp := FunctionParameterDefinition{Name: tokens[0]}\n\n\t\t\t\tvar typ string\n\t\t\t\tfor _, t := range tokens[1:] {\n\t\t\t\t\tswitch t {\n\t\t\t\t\tcase \"_In_\":\n\t\t\t\t\t\tp.Direction = DirectionIn\n\t\t\t\t\tcase \"_In_opt_\":\n\t\t\t\t\t\tp.Direction = DirectionInOpt\n\t\t\t\t\tcase \"_Out_\":\n\t\t\t\t\t\tp.Direction = DirectionOut\n\t\t\t\t\tcase \"_Out_opt_\":\n\t\t\t\t\t\tp.Direction = DirectionOutOpt\n\t\t\t\t\tcase \"_Inout_\":\n\t\t\t\t\t\tp.Direction = DirectionInOut\n\t\t\t\t\tcase \"_Inout_opt_\":\n\t\t\t\t\t\tp.Direction = DirectionInOutOpt\n\t\t\t\t\tcase \"_Reserved_\":\n\t\t\t\t\t\tp.Direction = DirectionReserved\n\t\t\t\t\tdefault:\n\t\t\t\t\t\ttyp = t\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tp.Type = translate(typ)\n\t\t\t\tif p.Type == \"\" {\n\t\t\t\t\treturn nil, fmt.Errorf(\"did not find translation type for %s\", typ)\n\t\t\t\t}\n\t\t\t\tf.Params = append(f.Params, p)\n\t\t\t\ttokens = tokens[0:0]\n\t\t\t\tif r == ')' {\n\t\t\t\t\tstate = StateExit\n\t\t\t\t}\n\t\t\t}\n\t\tcase StateExit:\n\t\t\tswitch r {\n\t\t\tcase ';':\n\t\t\t\tstate = StateInit\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif state != StateInit {\n\t\treturn nil, fmt.Errorf(\"parse error: wrong state %d\", state)\n\t}\n\treturn &f, nil\n}", "func (function *function) parse() (err error) {\n\tsignatureFinder := regexp.MustCompile(`(?is)CREATE(?:\\s+OR\\s+REPLACE)?\\s+FUNCTION\\s+(\\S+?)\\((.*?)\\)`)\n\tsubMatches := signatureFinder.FindStringSubmatch(function.definition)\n\n\tif len(subMatches) < 3 {\n\t\treturn fmt.Errorf(\"Can't find a function in %s\", function.path)\n\t}\n\n\tfunction.name = subMatches[1]\n\n\tif function.parseSignature {\n\t\tfunction.signature = subMatches[2]\n\t} else {\n\t\tfunction.signature, function.previousExists, err = function.previousSignature()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = function.removeDefaultFromSignature()\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (function *Function) Parse() (err error) {\n\tsignatureFinder := regexp.MustCompile(`(?is)CREATE(?:\\s+OR\\s+REPLACE)?\\s+FUNCTION\\s+(\\S+?)\\((.*?)\\)`)\n\tsubMatches := signatureFinder.FindStringSubmatch(function.Definition)\n\n\tif len(subMatches) < 3 {\n\t\treturn fmt.Errorf(\"Can't find a function in %s\", function.Path)\n\t}\n\n\tfunction.Name = subMatches[1]\n\n\tif function.ParseSignature {\n\t\tfunction.Signature = subMatches[2]\n\t} else {\n\t\tfunction.Signature, function.PreviousExists, err = function.previousSignature()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = function.removeDefaultFromSignature()\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func ParseFunction(fset *token.FileSet, fnc *ast.FuncDecl, bb *bytes.Buffer) *Function {\n\tf := &Function{Name: fnc.Name.Name}\n\n\tfnc.Body = nil\n\n\tif err := printer.Fprint(bb, fset, fnc); err != nil {\n\t\treturn nil\n\t}\n\n\tvar startPos int\n\tif fnc.Doc.Text() != \"\" {\n\t\tstartPos = int(fnc.Type.Pos() - fnc.Doc.Pos())\n\t}\n\n\tf.Signature = bb.String()[startPos:]\n\tf.Documentation = strings.TrimSpace(fnc.Doc.Text())\n\n\treturn f\n}", "func (v *Function) Decode(sr stream.Reader) error {\n\n\tnameIsSet := false\n\tthriftNameIsSet := false\n\targumentsIsSet := false\n\n\tif err := sr.ReadStructBegin(); err != nil {\n\t\treturn err\n\t}\n\n\tfh, ok, err := sr.ReadFieldBegin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor ok {\n\t\tswitch {\n\t\tcase fh.ID == 1 && fh.Type == wire.TBinary:\n\t\t\tv.Name, err = sr.ReadString()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tnameIsSet = true\n\t\tcase fh.ID == 2 && fh.Type == wire.TBinary:\n\t\t\tv.ThriftName, err = sr.ReadString()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tthriftNameIsSet = true\n\t\tcase fh.ID == 3 && fh.Type == wire.TList:\n\t\t\tv.Arguments, err = _List_Argument_Decode(sr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\targumentsIsSet = true\n\t\tcase fh.ID == 4 && fh.Type == wire.TStruct:\n\t\t\tv.ReturnType, err = _Type_Decode(sr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase fh.ID == 5 && fh.Type == wire.TList:\n\t\t\tv.Exceptions, err = _List_Argument_Decode(sr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase fh.ID == 6 && fh.Type == wire.TBool:\n\t\t\tvar x bool\n\t\t\tx, err = sr.ReadBool()\n\t\t\tv.OneWay = &x\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase fh.ID == 7 && fh.Type == wire.TMap:\n\t\t\tv.Annotations, err = _Map_String_String_Decode(sr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tdefault:\n\t\t\tif err := sr.Skip(fh.Type); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif err := sr.ReadFieldEnd(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif fh, ok, err = sr.ReadFieldBegin(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := sr.ReadStructEnd(); err != nil {\n\t\treturn err\n\t}\n\n\tif !nameIsSet {\n\t\treturn errors.New(\"field Name of Function is required\")\n\t}\n\n\tif !thriftNameIsSet {\n\t\treturn errors.New(\"field ThriftName of Function is required\")\n\t}\n\n\tif !argumentsIsSet {\n\t\treturn errors.New(\"field Arguments of Function is required\")\n\t}\n\n\treturn nil\n}", "func readFunction(fnKey confl.Node, fnNode confl.Node) (*Function, error) {\n\tname := fnKey.Value()\n\n\tif fnNode.Type() != confl.MapType {\n\t\treturn nil, errors.New(\"Invalid function definition\")\n\t}\n\n\tout := &Function{Name: name}\n\n\tfor _, pair := range confl.KVPairs(fnNode) {\n\t\tswitch pair.Key.Value() {\n\t\tcase \"Package\":\n\t\t\tif !confl.IsText(pair.Value) {\n\t\t\t\treturn nil, errors.New(\"Invalid package\")\n\t\t\t}\n\n\t\t\tout.Package = pair.Value.Value()\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"Invalid key\")\n\t\t}\n\t}\n\n\treturn out, nil\n}", "func (p *Parser) parseFunction(name string) (*Function, error) {\n\tname = strings.ToLower(name)\n\targs := make([]Expr, 0)\n\n\t// If there's a right paren then just return immediately.\n\t// This is the case for functions without arguments\n\tif tok, _, _ := p.scan(); tok == Rparen {\n\t\tfn := &Function{Name: name}\n\t\tif err := fn.validate(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn fn, nil\n\t}\n\tp.unscan()\n\n\targ, err := p.ParseExpr()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\targs = append(args, arg)\n\n\t// Parse additional function arguments if there is a comma.\n\tfor {\n\t\t// If there's not a comma, stop parsing arguments.\n\t\tif tok, _, _ := p.scanIgnoreWhitespace(); tok != Comma {\n\t\t\tp.unscan()\n\t\t\tbreak\n\t\t}\n\n\t\t// Parse an expression argument.\n\t\targ, err := p.ParseExpr()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\targs = append(args, arg)\n\t}\n\n\t// There should be a right parentheses at the end.\n\tif tok, pos, lit := p.scan(); tok != Rparen {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\")\"}, pos, p.expr)\n\t}\n\n\tfn := &Function{Name: name, Args: args}\n\n\tif err := fn.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn fn, nil\n}", "func ParseFnCall(data []byte) {\n\tvar r FnCall\n\terr := json.Unmarshal(data, &r)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\t// log.Println(r.Function)\n\t// log.Println(r.Data)\n\tFunctionCaller(r)\n}", "func Parse(tokens *list.List, funcDefs map[string]int) (ParseTreeRoot, error) {\r\n\r\n\ttoken := tokens.Front()\r\n\ttree := ParseTreeRoot{make([]ParseTree, 0)}\r\n\r\n\tfor token != nil {\r\n\t\tif tokenID(token) != TokenIdentifier {\r\n\t\t\treturn tree, fmt.Errorf(\"\\\"unit\\\", \\\"assembly\\\", \\\"enum\\\", \\\"summarize\\\", or \\\"solve\\\" expected but \\\"%s\\\" given at position %d\", tokenContent(token), tokenPos(token))\r\n\t\t}\r\n\r\n\t\tswitch tokenContent(token) {\r\n\t\tcase \"unit\":\r\n\t\t\t_token, unit, err := parseUnit(token.Next(), tree, funcDefs)\r\n\t\t\ttoken = _token\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn tree, err\r\n\t\t\t}\r\n\t\t\ttree.AddUnit(unit)\r\n\t\t\tbreak\r\n\t\tcase \"enum\":\r\n\t\t\t_token, enum, err := parseEnum(token.Next(), tree)\r\n\t\t\ttoken = _token\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn tree, err\r\n\t\t\t}\r\n\t\t\ttree.AddEnum(enum)\r\n\t\t\tbreak\r\n\t\tcase \"assembly\":\r\n\t\t\t_token, assembly, err := parseAssembly(token.Next(), tree)\r\n\t\t\ttoken = _token\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn tree, err\r\n\t\t\t}\r\n\t\t\ttree.AddAssembly(assembly)\r\n\t\t\tbreak\r\n\t\tcase \"summarize\":\r\n\t\t\t_token, summarize, err := parseSummarize(token.Next(), tree)\r\n\t\t\ttoken = _token\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn tree, err\r\n\t\t\t}\r\n\t\t\ttree.AddSummarize(summarize)\r\n\t\t\tbreak\r\n\t\tcase \"solve\":\r\n\t\t\t_token, solve, err := parseSolve(token.Next(), tree)\r\n\t\t\ttoken = _token\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn tree, err\r\n\t\t\t}\r\n\t\t\ttree.AddSolve(solve)\r\n\t\t\tbreak\r\n\t\t}\r\n\t\ttoken = token.Next()\r\n\t}\r\n\r\n\treturn tree, nil\r\n\r\n}", "func loadFunctions() (err error) {\n\tsuccessfulCount := len(conf.functionFiles)\n\terrors := make([]string, 0)\n\tbypass := make(map[string]bool)\n\n\tfiles, err := resolveDependencies(conf.functionFiles, conf.sqlDirPath+\"functions\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfunctions := make([]*function, 0)\n\tfor i := len(files) - 1; i >= 0; i-- {\n\t\tfile := files[i]\n\t\tf := function{}\n\t\tf.path = file\n\t\tfunctions = append(functions, &f)\n\n\t\terr = downPass(&f, f.path)\n\t\tif err != nil {\n\t\t\tsuccessfulCount--\n\t\t\terrors = append(errors, fmt.Sprintf(\"%v\\n\", err))\n\t\t\tbypass[f.path] = true\n\t\t}\n\t}\n\n\tfor i := len(functions) - 1; i >= 0; i-- {\n\t\tf := functions[i]\n\t\tif _, ignore := bypass[f.path]; !ignore {\n\t\t\terr = upPass(f, f.path)\n\t\t\tif err != nil {\n\t\t\t\tsuccessfulCount--\n\t\t\t\terrors = append(errors, fmt.Sprintf(\"%v\\n\", err))\n\t\t\t}\n\t\t}\n\t}\n\n\treport(\"functions\", successfulCount, len(conf.functionFiles), errors)\n\n\treturn\n}", "func processFunction(fn *ast.FuncDecl, fset *token.FileSet, rules *[]*Rule) {\n\tvar rule *Rule\n\tvar code []ast.Stmt\n\tfor _, stmt := range fn.Body.List {\n\t\tif match, patternStr := isSyntaxCall(stmt); match {\n\t\t\tif rule != nil {\n\t\t\t\trule.code = astStr(fset, code)\n\t\t\t\t*rules = append(*rules, rule)\n\t\t\t}\n\n\t\t\tpattern, vars := parsePattern(patternStr)\n\t\t\trule = &Rule{\n\t\t\t\tsymbol: fn.Name.Name,\n\t\t\t\ttyp: astStr(fset, fn.Type.Results.List[0].Type),\n\t\t\t\tpattern: pattern,\n\t\t\t\tvars: vars,\n\t\t\t}\n\t\t\tcode = nil\n\t\t} else {\n\t\t\tcode = append(code, stmt)\n\t\t}\n\t}\n\n\tif rule != nil {\n\t\trule.code = astStr(fset, code)\n\t\t*rules = append(*rules, rule)\n\t}\n\treturn\n}", "func parseFuncDefs(filename string) ([]string, error) {\n\tf, err := parser.ParseFile(fset, filename, nil, parser.AllErrors)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar funcNames []string\n\tast.Walk(visitFn(func(n ast.Node) bool {\n\t\tswitch n := n.(type) {\n\t\tcase *ast.CompositeLit:\n\t\t\tif isFuncMap(n.Type) {\n\t\t\t\tfor _, e := range n.Elts {\n\t\t\t\t\tkv := e.(*ast.KeyValueExpr)\n\t\t\t\t\tname, err := strconv.Unquote(kv.Key.(*ast.BasicLit).Value)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t\tfuncNames = append(funcNames, name)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}), f)\n\treturn funcNames, nil\n}", "func (p Program) lineToFunction(line string) Function {\n\tif len(line) < 3 {\n\t\treturn Function{}\n\t}\n\n\t// divide the function and the parameter\n\tdivided := strings.Split(line, \"(\")\n\n\t// name of the function at the position 0\n\tfunctionName := divided[0]\n\n\t// handle and split parameter\n\tparams := strings.Split(divided[1], \",\")\n\n\tparameter := []interface{}{}\n\tfor _, param := range params {\n\n\t\t// check length of the parameter\n\t\tif len(param) < 1 {\n\t\t\tfmt.Println(\"drawlab: null parameter found at the function\", functionName)\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\t// remove all whitespace and closed bracket\n\n\t\tparam = strings.ReplaceAll(param, \")\", \"\")\n\t\tparam = strings.ReplaceAll(param, \"\\n\", \"\")\n\n\t\t// check if first byte is a space\n\t\ttempParam := param\n\t\tif param[0] == ' ' {\n\t\t\ttempParam = param[1:]\n\t\t}\n\n\t\t// check if number\n\t\tn, err := strconv.Atoi(tempParam)\n\t\tif err != nil {\n\n\t\t\t// check if it's text for the text function\n\t\t\tif tempParam[0] == '\"' {\n\t\t\t\t// remove quotes\n\t\t\t\tparameter = append(parameter, tempParam[1:len(tempParam)-1])\n\n\t\t\t\t// check if it's variable\n\t\t\t} else if tempParam[0] == '#' {\n\n\t\t\t\t// remove '#'\n\t\t\t\tval := p.getVariable(tempParam[1:])\n\t\t\t\tparameter = append(parameter, val)\n\t\t\t}\n\t\t} else {\n\t\t\tparameter = append(parameter, n)\n\t\t}\n\t}\n\n\treturn Function{\n\t\tName: functionName,\n\t\tParameters: parameter,\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parseDependencies ... The term "dependency" is used here to refer to any data type that may require an include or forward declare.
func (i *Interface) parseDependencies() { var dependencies []string for _, function := range i.Functions { // "expanded" refers to creating a parsers.from a templated type, i.e "QMap <int, QString>" becomes [QMap int QString] expandedReturnType := strings.FieldsFunc(function.ReturnType, templatedTypeSeparators) for _, dataType := range(expandedReturnType) { dependencies = append(dependencies, strings.TrimSpace(dataType)) } for _, parameter := range function.Parameters { expandedParameter := strings.FieldsFunc(parameter.Type, templatedTypeSeparators) for _, innerParameter := range expandedParameter { dependencies = append(dependencies, strings.TrimSpace(innerParameter)) } } } i.Dependencies = dependencies i.Dependencies = parsers.RemoveConstSpecifiers(i.Dependencies) i.Dependencies = parsers.RemovePointersAndReferences(i.Dependencies) i.Dependencies = parsers.RemoveStdDataTypes(i.Dependencies) i.Dependencies = parsers.MapDataTypesToLibraryDependencies(i.Dependencies) i.Dependencies = parsers.RemoveDuplicates(i.Dependencies) sort.Strings(i.Dependencies) }
[ "func ExtractDependenciesList(depends string) ([]*Dependency, error) {\n\tdeps := []*Dependency{}\n\tdepends = strings.TrimSpace(depends)\n\tif depends == \"\" {\n\t\treturn deps, nil\n\t}\n\tfor _, dep := range strings.Split(depends, \",\") {\n\t\tdep = strings.TrimSpace(dep)\n\t\tif dep == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"invalid dep: %s\", dep)\n\t\t}\n\t\tmatches := re.FindAllStringSubmatch(dep, -1)\n\t\tif matches == nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid dep: %s\", dep)\n\t\t}\n\t\tdeps = append(deps, &Dependency{\n\t\t\tName: matches[0][1],\n\t\t\tVersion: matches[0][2],\n\t\t})\n\t}\n\treturn deps, nil\n}", "func (instance *Feature) Dependencies(_ context.Context) (map[string]struct{}, fail.Error) {\n\tif valid.IsNil(instance) {\n\t\treturn nil, fail.InvalidInstanceError()\n\t}\n\n\treturn instance.file.getDependencies(), nil\n}", "func (s DhcpServer) Dependencies() (deps []depgraph.Dependency) {\n\treturn []depgraph.Dependency{\n\t\t{\n\t\t\tRequiredItem: depgraph.ItemRef{\n\t\t\t\tItemType: NetNamespaceTypename,\n\t\t\t\tItemName: normNetNsName(s.NetNamespace),\n\t\t\t},\n\t\t\tDescription: \"Network namespace must exist\",\n\t\t},\n\t\t{\n\t\t\tRequiredItem: depgraph.ItemRef{\n\t\t\t\tItemType: VethTypename,\n\t\t\t\tItemName: s.VethName,\n\t\t\t},\n\t\t\tDescription: \"veth interface must exist\",\n\t\t},\n\t}\n}", "func convertDependencies(deps []string) []*license_metadata_proto.AnnotatedDependency {\n\tvar ret []*license_metadata_proto.AnnotatedDependency\n\n\tfor _, d := range deps {\n\t\tcomponents := strings.Split(d, \":\")\n\t\tdep := components[0]\n\t\tcomponents = components[1:]\n\t\tad := &license_metadata_proto.AnnotatedDependency{\n\t\t\tFile: proto.String(dep),\n\t\t\tAnnotations: make([]string, 0, len(components)),\n\t\t}\n\t\tfor _, ann := range components {\n\t\t\tif len(ann) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tad.Annotations = append(ad.Annotations, ann)\n\t\t}\n\t\tret = append(ret, ad)\n\t}\n\n\treturn ret\n}", "func (i *Interface) parseForwardDeclares() {\n\tfor _, dependency := range i.Dependencies {\n\t\tif !parsers.ShouldBeIncludedInHeader(dependency) {\n\t\t\ti.ForwardDeclaresString += \"class \" + dependency + \";\\n\"\n\t\t} \n\t}\n}", "func Parse(description string) (deps []Dep) {\n\tfor _, footerValue := range footer.ParseMessage(description)[cqDependKey] {\n\t\tfor _, v := range strings.Split(footerValue, \",\") {\n\t\t\tif dep, err := parseSingleDep(v); err == nil {\n\t\t\t\tdeps = append(deps, dep)\n\t\t\t}\n\t\t}\n\t}\n\tif len(deps) <= 1 {\n\t\treturn deps\n\t}\n\tsort.Slice(deps, func(i, j int) bool { return deps[i].cmp(deps[j]) == 1 })\n\t// Remove duplicates. We don't use the map in the first place, because\n\t// duplicates are highly unlikely in practice and sorting is nice for\n\t// determinism.\n\tl := 0\n\tfor i := 1; i < len(deps); i++ {\n\t\tif d := deps[i]; d.cmp(deps[l]) != 0 {\n\t\t\tl += 1\n\t\t\tdeps[l] = d\n\t\t}\n\t}\n\treturn deps[:l+1]\n}", "func Dependencies(g *Graph) (pacman.Packages, aur.Packages, []string) {\n\trps := make(pacman.Packages, 0)\n\taps := make(aur.Packages, 0)\n\tups := make([]string, 0)\n\n\tnames := make(map[string]bool)\n\tnodes := AllNodesBottomUp(g)\n\tfor _, vn := range nodes {\n\t\tn := vn.(*Node)\n\t\tif names[n.PkgName()] {\n\t\t\tcontinue\n\t\t}\n\n\t\tnames[n.PkgName()] = true\n\t\tswitch p := n.AnyPackage.(type) {\n\t\tcase *aur.Package:\n\t\t\taps = append(aps, p)\n\t\tcase *pacman.Package:\n\t\t\tif p.Origin == pacman.UnknownOrigin {\n\t\t\t\tups = append(ups, p.Name)\n\t\t\t} else {\n\t\t\t\trps = append(rps, p)\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(\"unexpected type of package in graph\")\n\t\t}\n\t}\n\treturn rps, aps, ups\n}", "func (d *ABFToInterfaceDescriptor) Dependencies(key string, emptyVal proto.Message) []api.Dependency {\n\t_, ifName, _ := vpp_abf.ParseToInterfaceKey(key)\n\treturn []api.Dependency{\n\t\t{\n\t\t\tLabel: interfaceDep,\n\t\t\tKey: vpp_interfaces.InterfaceKey(ifName),\n\t\t},\n\t}\n}", "func (line Line) isDependency() bool {\n\tfor k := range dependencyMap {\n\t\tif strings.HasPrefix(line.Last, k) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func loadDependencies(chartPath string, f *Filter) (*chartutil.Requirements, error) {\n\tc, err := chartutil.Load(chartPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treqs, err := chartutil.LoadRequirements(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar deps []*chartutil.Dependency\n\tfor _, d := range reqs.Dependencies {\n\t\tif strings.Contains(d.Repository, filePrefix) {\n\t\t\td.Repository = fmt.Sprintf(\"%s%s\", filePrefix, filepath.Join(chartPath, strings.TrimPrefix(d.Repository, filePrefix)))\n\t\t}\n\t\tdeps = append(deps, d)\n\t}\n\n\treqs.Dependencies = f.FilterDependencies(deps)\n\treturn reqs, nil\n}", "func ParseDeps(fileName string) []string {\n\tfset := token.NewFileSet()\n\n\tf, err := parser.ParseFile(fset, fileName, nil, parser.ImportsOnly)\n\tCheck(err)\n\n\tdepsArray := make([]string, len(f.Imports))\n\tfor index, s := range f.Imports {\n\t\tdepName := strings.Replace(s.Path.Value, string('\"'), \" \", 2)\n\t\tdepName = strings.Replace(depName, \" \", \"\", 10)\n\t\tdepsArray[index] = depName\n\t}\n\n\treturn depsArray\n}", "func buildDependencies(fdSet *dpb.FileDescriptorSet) {\n\t// Dependency to google/api/annotations.proto for gRPC-HTTP transcoding. Here a couple of problems arise:\n\t// 1. Problem: \tWe cannot call descriptor.ForMessage(&annotations.E_Http), which would be our\n\t//\t\t\t\trequired dependency. However, we can call descriptor.ForMessage(&http) and\n\t//\t\t\t\tthen construct the extension manually.\n\t// 2. Problem: \tThe name is set wrong.\n\t// 3. Problem: \tgoogle/api/annotations.proto has a dependency to google/protobuf/descriptor.proto.\n\thttp := annotations.Http{}\n\tfd, _ := descriptor.MessageDescriptorProto(&http)\n\n\textensionName := \"http\"\n\tn := \"google/api/annotations.proto\"\n\tl := dpb.FieldDescriptorProto_LABEL_OPTIONAL\n\tt := dpb.FieldDescriptorProto_TYPE_MESSAGE\n\ttName := \"google.api.HttpRule\"\n\textendee := \".google.protobuf.MethodOptions\"\n\n\thttpExtension := &dpb.FieldDescriptorProto{\n\t\tName: &extensionName,\n\t\tNumber: &annotations.E_Http.Field,\n\t\tLabel: &l,\n\t\tType: &t,\n\t\tTypeName: &tName,\n\t\tExtendee: &extendee,\n\t}\n\n\tfd.Extension = append(fd.Extension, httpExtension) // 1. Problem\n\tfd.Name = &n // 2. Problem\n\tfd.Dependency = append(fd.Dependency, \"google/protobuf/descriptor.proto\") //3.rd Problem\n\n\t// Build other required dependencies\n\te := empty.Empty{}\n\tfdp := dpb.DescriptorProto{}\n\tfd2, _ := descriptor.MessageDescriptorProto(&e)\n\tfd3, _ := descriptor.MessageDescriptorProto(&fdp)\n\tdependencies := []*dpb.FileDescriptorProto{fd, fd2, fd3}\n\n\t// According to the documentation of protoReflect.CreateFileDescriptorFromSet the file I want to print\n\t// needs to be at the end of the array. All other FileDescriptorProto are dependencies.\n\tfdSet.File = append(dependencies, fdSet.File...)\n}", "func (lp *layerProcessor) processDependencies(manifest *schema.ImageManifest) error {\n\tfor _, dep := range manifest.Dependencies {\n\t\tdephash, depmanifest := lp.findImageDependency(dep)\n\n\t\t// check the image\n\t\tif depmanifest == nil {\n\t\t\tversion, _ := dep.Labels.Get(\"version\")\n\t\t\treturn fmt.Errorf(\"failed to locate dependent image %s:%s\", dep.ImageName.String(), version)\n\t\t}\n\n\t\t// Validate that it already hasn't showed up in resolution. If it has, skip it.\n\t\tif lp.refs[dephash] {\n\t\t\tcontinue\n\t\t}\n\n\t\t// add it to the layers and walk its dependendencies\n\t\tlp.layers = append(lp.layers, dephash)\n\t\tlp.refs[dephash] = true\n\t\tif err := lp.processDependencies(depmanifest); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (*serverModule) Dependencies() []module.Dependency {\n\treturn nil\n}", "func (r SrcIPRule) Dependencies() (deps []depgraph.Dependency) {\n\treturn []depgraph.Dependency{\n\t\t{\n\t\t\tRequiredItem: depgraph.ItemRef{\n\t\t\t\tItemType: genericitems.AdapterTypename,\n\t\t\t\tItemName: r.AdapterIfName,\n\t\t\t},\n\t\t\tDescription: \"Not strictly necessary\",\n\t\t},\n\t}\n}", "func ParseDepFile(content []byte) ([]string, []string) {\n\tcontent = bytes.Replace(content, []byte(\"\\\\\\n\"), nil, -1)\n\tcomponents := bytes.Split(content, []byte(\":\"))\n\tif len(components) != 2 {\n\t\treturn nil, nil\n\t}\n\n\ttargetStrs := bytes.Split(components[0], []byte(\" \"))\n\tdepStrs := bytes.Split(components[1], []byte(\" \"))\n\n\tvar targets, deps []string\n\tfor _, t := range targetStrs {\n\t\tif len(t) > 0 {\n\t\t\ttargets = append(targets, string(t))\n\t\t}\n\t}\n\tfor _, d := range depStrs {\n\t\tif len(d) > 0 {\n\t\t\tdeps = append(deps, string(d))\n\t\t}\n\t}\n\n\treturn targets, deps\n}", "func (r *Rule) Dependencies() []label.Label {\n\treturn r.deps\n}", "func parseNpmDependenciesList(dependencies map[string]*npmutils.Dependency, packageInfo *npmutils.PackageInfo) (xrDependencyTree *services.GraphNode) {\n\ttreeMap := make(map[string][]string)\n\tfor dependencyId, dependency := range dependencies {\n\t\tdependencyId = npmPackageTypeIdentifier + dependencyId\n\t\tparent := npmPackageTypeIdentifier + dependency.GetPathToRoot()[0][0]\n\t\tif children, ok := treeMap[parent]; ok {\n\t\t\ttreeMap[parent] = append(children, dependencyId)\n\t\t} else {\n\t\t\ttreeMap[parent] = []string{dependencyId}\n\t\t}\n\t}\n\treturn buildXrayDependencyTree(treeMap, npmPackageTypeIdentifier+packageInfo.BuildInfoModuleId())\n}", "func addDependencies(fdSet *dpb.FileDescriptorSet) {\n\t// At last, we need to add the dependencies to the FileDescriptorProto in order to get them rendered.\n\tlastFdProto := getLast(fdSet.File)\n\tfor _, fd := range fdSet.File {\n\t\tif fd != lastFdProto {\n\t\t\tif *fd.Name == \"google/protobuf/empty.proto\" { // Reference: https://github.com/googleapis/gnostic-grpc/issues/8\n\t\t\t\tif shouldRenderEmptyImport {\n\t\t\t\t\tlastFdProto.Dependency = append(lastFdProto.Dependency, *fd.Name)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlastFdProto.Dependency = append(lastFdProto.Dependency, *fd.Name)\n\t\t}\n\t}\n\t// Sort imports so they will be rendered in a consistent order.\n\tsort.Strings(lastFdProto.Dependency)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parseIncludes .. Parses dependencies to create an include string for each.
func (i *Interface) parseIncludes() { for _, dependency := range i.Dependencies { include := NewInclude(dependency) if parsers.ShouldBeIncludedInHeader(dependency) { i.HeaderIncludesString += include.ToString() + "\n" } else { i.ImplementationIncludesString += include.ToString() + "\n" } } }
[ "func processIncludes(source string) string {\n\tlines := strings.Split(source, \"\\n\")\n\tvar result []string\n\tfor _, line := range lines {\n\t\ttrimmed := strings.TrimSpace(line)\n\t\tif url := parseIncludeURL(trimmed); url != \"\" {\n\t\t\tif buf, err := curl(url); err == nil {\n\t\t\t\tresult = append(result, string(buf))\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tresult = append(result, line)\n\t}\n\n\treturn strings.Join(result, \"\\n\")\n}", "func (c *Converter) ProcessIncludes() ConverterExpansions {\n\tif c.err != nil {\n\t\treturn c\n\t}\n\n\tf := func(node ast.Node, visitType int) error {\n\t\tswitch n := node.(type) {\n\t\tcase *nast.IncludeDirective:\n\t\t\treturn c.convertInclude(n)\n\t\t}\n\t\treturn nil\n\t}\n\tc.err = c.prog.Accept(ast.VisitorFunc(f))\n\treturn c\n}", "func parseInclude(node *node32) string {\n\tstrNode := nextNode(node, ruleSTRLITER)\n\tfile := nextNode(strNode.up, ruleSTR).match\n\n\treturn file\n}", "func (p *Processor) parseIncludesResursive(wd string, name string, includes map[string]*ast.API) parse.ErrorList {\n\tpath, err := filepath.Abs(filepath.Join(wd, name))\n\tif err != nil {\n\t\treturn parse.ErrorList{parse.Error{Message: err.Error()}}\n\t}\n\tif _, seen := includes[path]; seen {\n\t\treturn nil\n\t}\n\tapi, allErrs := p.Parse(path)\n\tif api == nil {\n\t\treturn allErrs\n\t}\n\tincludes[path] = api\n\tfor _, i := range api.Imports {\n\t\tif i.Name != nil {\n\t\t\t// named imports don't get merged\n\t\t\tcontinue\n\t\t}\n\t\tpath := filepath.Join(wd, i.Path.Value)\n\t\twd, name := filepath.Split(path)\n\t\tif errs := p.parseIncludesResursive(wd, name, includes); len(errs) > 0 {\n\t\t\tallErrs = append(allErrs, errs...)\n\t\t}\n\t}\n\treturn allErrs\n}", "func (ctx *context) hoistIncludes(result []byte) []byte {\n\tincludesStart := bytes.Index(result, []byte(\"#include\"))\n\tif includesStart == -1 {\n\t\treturn result\n\t}\n\tincludes := make(map[string]bool)\n\tincludeRe := regexp.MustCompile(\"#include <.*>\\n\")\n\tfor _, match := range includeRe.FindAll(result, -1) {\n\t\tincludes[string(match)] = true\n\t}\n\tresult = includeRe.ReplaceAll(result, nil)\n\t// Certain linux and bsd headers are broken and go to the bottom.\n\tvar sorted, sortedBottom, sortedTop []string\n\tfor include := range includes {\n\t\tif strings.Contains(include, \"<linux/\") {\n\t\t\tsortedBottom = append(sortedBottom, include)\n\t\t} else if strings.Contains(include, \"<netinet/if_ether.h>\") {\n\t\t\tsortedBottom = append(sortedBottom, include)\n\t\t} else if strings.Contains(include, \"<keyutils.h>\") {\n\t\t\tsortedBottom = append(sortedBottom, include)\n\t\t} else if ctx.target.OS == freebsd && strings.Contains(include, \"<sys/types.h>\") {\n\t\t\tsortedTop = append(sortedTop, include)\n\t\t} else {\n\t\t\tsorted = append(sorted, include)\n\t\t}\n\t}\n\tsort.Strings(sortedTop)\n\tsort.Strings(sorted)\n\tsort.Strings(sortedBottom)\n\tnewResult := append([]byte{}, result[:includesStart]...)\n\tnewResult = append(newResult, strings.Join(sortedTop, \"\")...)\n\tnewResult = append(newResult, '\\n')\n\tnewResult = append(newResult, strings.Join(sorted, \"\")...)\n\tnewResult = append(newResult, '\\n')\n\tnewResult = append(newResult, strings.Join(sortedBottom, \"\")...)\n\tnewResult = append(newResult, result[includesStart:]...)\n\treturn newResult\n}", "func lexInclude(lx *lexer) stateFn {\r\n\tr := lx.next()\r\n\tswitch {\r\n\tcase r == sqStringStart:\r\n\t\tlx.ignore() // ignore the \" or '\r\n\t\treturn lexIncludeQuotedString\r\n\tcase r == dqStringStart:\r\n\t\tlx.ignore() // ignore the \" or '\r\n\t\treturn lexIncludeDubQuotedString\r\n\tcase r == arrayStart:\r\n\t\treturn lx.errorf(\"Expected include value but found start of an array\")\r\n\tcase r == mapStart:\r\n\t\treturn lx.errorf(\"Expected include value but found start of a map\")\r\n\tcase r == blockStart:\r\n\t\treturn lx.errorf(\"Expected include value but found start of a block\")\r\n\tcase unicode.IsDigit(r), r == '-':\r\n\t\treturn lx.errorf(\"Expected include value but found start of a number\")\r\n\tcase r == '\\\\':\r\n\t\treturn lx.errorf(\"Expected include value but found escape sequence\")\r\n\tcase isNL(r):\r\n\t\treturn lx.errorf(\"Expected include value but found new line\")\r\n\t}\r\n\tlx.backup()\r\n\treturn lexIncludeString\r\n}", "func (i *Interface) parseForwardDeclares() {\n\tfor _, dependency := range i.Dependencies {\n\t\tif !parsers.ShouldBeIncludedInHeader(dependency) {\n\t\t\ti.ForwardDeclaresString += \"class \" + dependency + \";\\n\"\n\t\t} \n\t}\n}", "func Include(inp string) string {\n\tif match := Submatch(inp, `(?i)^\\.include\\s+\"?([^\\n\\r\"]*)\"?$`); match != nil {\n\t\treturn match[1]\n\t}\n\treturn \"\"\n}", "func (s *Service) ReferencedIncludes() ([]*Include, error) {\n\tvar err error\n\tincludes := []*Include{}\n\tincludesSet := make(map[string]*Include)\n\n\t// Check extended service.\n\tif s.Extends != \"\" && strings.Contains(s.Extends, \".\") {\n\t\tincludeName := s.Extends[0:strings.Index(s.Extends, \".\")]\n\t\tinclude := s.Frugal.Include(includeName)\n\t\tif include == nil {\n\t\t\treturn nil, fmt.Errorf(\"Service %s extends references invalid include %s\",\n\t\t\t\ts.Name, s.Extends)\n\t\t}\n\t\tif _, ok := includesSet[includeName]; !ok {\n\t\t\tincludesSet[includeName] = include\n\t\t\tincludes = append(includes, include)\n\t\t}\n\t}\n\n\t// Check methods.\n\tfor _, method := range s.Methods {\n\t\t// Check arguments.\n\t\tfor _, arg := range method.Arguments {\n\t\t\tincludesSet, includes, err = addInclude(includesSet, includes, arg.Type, s.Frugal)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\t// Check return type.\n\t\tif method.ReturnType != nil {\n\t\t\tincludesSet, includes, err = addInclude(includesSet, includes, method.ReturnType, s.Frugal)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// Check exceptions.\n\t\tfor _, exception := range method.Exceptions {\n\t\t\tincludesSet, includes, err = addInclude(includesSet, includes, exception.Type, s.Frugal)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn includes, nil\n}", "func IncludeFactory(p *core.Parser, config *core.Configuration) (core.Tag, error) {\n\tincludeName, err := p.ReadValue()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif includeName == nil {\n\t\treturn nil, p.Error(\"Invalid include value\")\n\t}\n\n\tvar scopeType string\n\tvar scopeKey string\n\tvar scope core.Value\n\tparams := make(map[string]core.Value)\n\n\tvar next = p.SkipSpaces()\n\tif next == 'w' || next == 'f' {\n\t\tscopeType = p.ReadName()\n\n\t\tif scopeType == includeWithScopeType || scopeType == includeForScopeType {\n\t\t\tincludeNameString := core.ToString(includeName.Resolve(nil))\n\t\t\tscopeKey = strings.TrimSuffix(includeNameString, filepath.Ext(includeNameString))\n\n\t\t\tscope, err = p.ReadValue()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tnext = p.SkipSpaces()\n\t}\n\n\tif next == ',' {\n\t\tp.Forward()\n\n\t\tfor name := p.ReadName(); name != \"\"; name = p.ReadName() {\n\t\t\tif p.SkipSpaces() == ':' {\n\t\t\t\tp.Forward()\n\n\t\t\t\tparams[name], err = p.ReadValue()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif p.SkipSpaces() == ',' {\n\t\t\t\tp.Forward()\n\t\t\t}\n\t\t}\n\t}\n\n\tp.SkipPastTag()\n\n\treturn &Include{\n\t\tincludeName: includeName,\n\t\thandler: config.GetIncludeHandler(),\n\t\tscopeType: scopeType,\n\t\tscopeKey: scopeKey,\n\t\tscope: scope,\n\t\tparameters: params,\n\t}, nil\n}", "func (f *Filter) Include() []string { return patternsToStrings(f.include...) }", "func parseCompressIncludes(includes []string) ([]string, error) {\n\tfor _, e := range includes {\n\t\tif len(e) == 0 {\n\t\t\treturn nil, config.ErrInvalidCompressionIncludesValue(nil).Msg(\"extension/mime-type (%s) cannot be empty\", e)\n\t\t}\n\t}\n\treturn includes, nil\n}", "func (f *Frugal) OrderedIncludes() []*Frugal {\n\tkeys := make([]string, 0, len(f.ParsedIncludes))\n\tfor key := range f.ParsedIncludes {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Strings(keys)\n\n\tincludes := make([]*Frugal, 0, len(f.ParsedIncludes))\n\tfor _, key := range keys {\n\t\tincludes = append(includes, f.ParsedIncludes[key])\n\t}\n\treturn includes\n}", "func (i *Interface) parseDependencies() {\n\tvar dependencies []string\n\tfor _, function := range i.Functions {\n\n\t\t// \"expanded\" refers to creating a parsers.from a templated type, i.e \"QMap <int, QString>\" becomes [QMap int QString]\n\t\texpandedReturnType := strings.FieldsFunc(function.ReturnType, templatedTypeSeparators) \n\t\tfor _, dataType := range(expandedReturnType) {\n\t\t\tdependencies = append(dependencies, strings.TrimSpace(dataType))\n\t\t}\n\n\t\tfor _, parameter := range function.Parameters {\n\t\t\texpandedParameter := strings.FieldsFunc(parameter.Type, templatedTypeSeparators)\n\t\t\tfor _, innerParameter := range expandedParameter {\n\t\t\t\tdependencies = append(dependencies, strings.TrimSpace(innerParameter))\n\t\t\t} \n\t\t}\n\t}\n\ti.Dependencies = dependencies\n\ti.Dependencies = parsers.RemoveConstSpecifiers(i.Dependencies)\n\ti.Dependencies = parsers.RemovePointersAndReferences(i.Dependencies)\n\ti.Dependencies = parsers.RemoveStdDataTypes(i.Dependencies)\n\ti.Dependencies = parsers.MapDataTypesToLibraryDependencies(i.Dependencies)\n\ti.Dependencies = parsers.RemoveDuplicates(i.Dependencies)\n\tsort.Strings(i.Dependencies)\n}", "func (o *object) includes() io.Reader {\n\tsort.Strings(o.inc)\n\treturn strings.NewReader(strings.Join(o.inc, \"\\n\"))\n}", "func lexIncludeString(lx *lexer) stateFn {\r\n\tr := lx.next()\r\n\tswitch {\r\n\tcase isNL(r) || r == eof || r == optValTerm || r == mapEnd || isWhitespace(r):\r\n\t\tlx.backup()\r\n\t\tlx.emit(itemInclude)\r\n\t\treturn lx.pop()\r\n\tcase r == sqStringEnd:\r\n\t\tlx.backup()\r\n\t\tlx.emit(itemInclude)\r\n\t\tlx.next()\r\n\t\tlx.ignore()\r\n\t\treturn lx.pop()\r\n\t}\r\n\treturn lexIncludeString\r\n}", "func addInclude(includesSet map[string]*Include, includes []*Include, t *Type, frugal *Frugal) (map[string]*Include, []*Include, error) {\n\tvar err error\n\tif strings.Contains(t.Name, \".\") {\n\t\tincludeName := t.Name[0:strings.Index(t.Name, \".\")]\n\t\tinclude := frugal.Include(includeName)\n\t\tif include == nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"Type %s references invalid include %s\", t.Name, include.Name)\n\t\t}\n\t\tif _, ok := includesSet[includeName]; !ok {\n\t\t\tincludesSet[includeName] = include\n\t\t\tincludes = append(includes, include)\n\t\t}\n\t}\n\t// Check container types.\n\tif t.KeyType != nil {\n\t\tincludesSet, includes, err = addInclude(includesSet, includes, t.KeyType, frugal)\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif t.ValueType != nil {\n\t\tincludesSet, includes, err = addInclude(includesSet, includes, t.ValueType, frugal)\n\t}\n\treturn includesSet, includes, err\n}", "func (ts *TemplateSet) joinIncludes(t *ttpl.Template) error {\n\tvar err error\n\tfor _, basePath := range ts.baseSearchPaths {\n\t\tfor _, includePath := range ts.includePaths {\n\t\t\ttplPath := filepath.Join(basePath, includePath)\n\t\t\tif !ackutil.FileExists(tplPath) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif t, err = includeTemplate(t, tplPath); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func appendIncludedFiles(ast *AST, ifm *IncludeFiles) {\n\tfor _, include := range ast.includes {\n\t\tabsoluteFile := fmt.Sprintf(\"%v/%v\", ifm.dir,\n\t\t\tinclude)\n\n\t\t_, included := ifm.files[absoluteFile]\n\t\tif included {\n\t\t\tcontinue\n\t\t}\n\n\t\tifm.Include(absoluteFile)\n\n\t\twaccIncl := parseInput(absoluteFile)\n\t\tastIncl := generateASTFromWACC(waccIncl, ifm)\n\n\t\tast.enums = append(ast.enums,\n\t\t\tastIncl.enums...)\n\n\t\tast.classes = append(ast.classes,\n\t\t\tastIncl.classes...)\n\n\t\tast.functions = append(ast.functions,\n\t\t\tastIncl.functions...)\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parseForwardDeclares .. Parses dependencies to create an foward declare string for each.
func (i *Interface) parseForwardDeclares() { for _, dependency := range i.Dependencies { if !parsers.ShouldBeIncludedInHeader(dependency) { i.ForwardDeclaresString += "class " + dependency + ";\n" } } }
[ "func (i *Interface) parseDependencies() {\n\tvar dependencies []string\n\tfor _, function := range i.Functions {\n\n\t\t// \"expanded\" refers to creating a parsers.from a templated type, i.e \"QMap <int, QString>\" becomes [QMap int QString]\n\t\texpandedReturnType := strings.FieldsFunc(function.ReturnType, templatedTypeSeparators) \n\t\tfor _, dataType := range(expandedReturnType) {\n\t\t\tdependencies = append(dependencies, strings.TrimSpace(dataType))\n\t\t}\n\n\t\tfor _, parameter := range function.Parameters {\n\t\t\texpandedParameter := strings.FieldsFunc(parameter.Type, templatedTypeSeparators)\n\t\t\tfor _, innerParameter := range expandedParameter {\n\t\t\t\tdependencies = append(dependencies, strings.TrimSpace(innerParameter))\n\t\t\t} \n\t\t}\n\t}\n\ti.Dependencies = dependencies\n\ti.Dependencies = parsers.RemoveConstSpecifiers(i.Dependencies)\n\ti.Dependencies = parsers.RemovePointersAndReferences(i.Dependencies)\n\ti.Dependencies = parsers.RemoveStdDataTypes(i.Dependencies)\n\ti.Dependencies = parsers.MapDataTypesToLibraryDependencies(i.Dependencies)\n\ti.Dependencies = parsers.RemoveDuplicates(i.Dependencies)\n\tsort.Strings(i.Dependencies)\n}", "func (id ifaceDefiner) Declare() {\n\tfor ix := range id.pkg.Files {\n\t\tfile, pfile := id.pkg.Files[ix], id.pfiles[ix]\n\t\tfor _, pdef := range pfile.Interfaces {\n\t\t\texport, err := validIdent(pdef.Name, reservedNormal)\n\t\t\tif err != nil {\n\t\t\t\tid.env.prefixErrorf(file, pdef.Pos, err, \"interface %s invalid name\", pdef.Name)\n\t\t\t\tcontinue // keep going to catch more errors\n\t\t\t}\n\t\t\tdetail := identDetail(\"interface\", file, pdef.Pos)\n\t\t\tif err := file.DeclareIdent(pdef.Name, detail); err != nil {\n\t\t\t\tid.env.prefixErrorf(file, pdef.Pos, err, \"interface %s name conflict\", pdef.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdef := &Interface{NamePos: NamePos(pdef.NamePos), Exported: export, File: file}\n\t\t\tid.builders[pdef.Name] = &ifaceBuilder{def, pdef}\n\t\t}\n\t}\n}", "func (td typeDefiner) Declare() {\n\tfor ix := range td.pkg.Files {\n\t\tfile, pfile := td.pkg.Files[ix], td.pfiles[ix]\n\t\tfor _, pdef := range pfile.TypeDefs {\n\t\t\tdetail := identDetail(\"type\", file, pdef.Pos)\n\t\t\tif err := file.DeclareIdent(pdef.Name, detail); err != nil {\n\t\t\t\ttd.env.prefixErrorf(file, pdef.Pos, err, \"type %s name conflict\", pdef.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttd.builders[pdef.Name] = td.makeTypeDefBuilder(file, pdef)\n\t\t}\n\t}\n}", "func resolveDeclarations(ctx *sql.Context, a *Analyzer, node sql.Node, scope *Scope) (sql.Node, error) {\n\treturn resolveDeclarationsInner(ctx, a, node, newDeclarationScope(nil))\n}", "func (i *Interface) parseIncludes() {\n\tfor _, dependency := range i.Dependencies {\n\t\tinclude := NewInclude(dependency)\n\t\tif parsers.ShouldBeIncludedInHeader(dependency) {\n\t\t\ti.HeaderIncludesString += include.ToString() + \"\\n\"\n\t\t} else {\n\t\t\ti.ImplementationIncludesString += include.ToString() + \"\\n\"\n\t\t}\n\t}\n}", "func (c *C) declareRefRels() {\n\tr := map[semantic.Type]refRel{}\n\tc.refRels = r\n\n\tsli := refRel{}\n\tsli.declare(c, \"slice\", c.T.Sli)\n\tr[slicePrototype] = sli\n\n\tstr := refRel{}\n\tstr.declare(c, \"string\", c.T.StrPtr)\n\tr[semantic.StringType] = str\n\n\tvar isRefTy func(ty semantic.Type) bool\n\tisRefTy = func(ty semantic.Type) bool {\n\t\tty = semantic.Underlying(ty)\n\t\tif ty == semantic.StringType {\n\t\t\treturn true\n\t\t}\n\t\tswitch ty := ty.(type) {\n\t\tcase *semantic.Slice, *semantic.Reference, *semantic.Map:\n\t\t\treturn true\n\t\tcase *semantic.Class:\n\t\t\tfor _, f := range ty.Fields {\n\t\t\t\tif isRefTy(f.Type) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\t// Forward declare all the reference types.\n\tfor apiTy, cgTy := range c.T.target {\n\t\tapiTy = semantic.Underlying(apiTy)\n\t\tswitch apiTy {\n\t\tcase semantic.StringType:\n\t\t\t// Already implemented\n\n\t\tdefault:\n\t\t\tswitch apiTy := apiTy.(type) {\n\t\t\tcase *semantic.Slice:\n\t\t\t\tr[apiTy] = sli\n\n\t\t\tdefault:\n\t\t\t\tif isRefTy(apiTy) {\n\t\t\t\t\tfuncs := refRel{}\n\t\t\t\t\tfuncs.declare(c, apiTy.Name(), cgTy)\n\t\t\t\t\tr[apiTy] = funcs\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *BasePhpParserListener) EnterDeclareList(ctx *DeclareListContext) {}", "func (p *gc_parser) parse_import_decl() {\n\tp.expect_keyword(\"import\")\n\talias := p.expect(scanner.Ident)\n\tpath := p.parse_package()\n\tfullName := \"!\" + path.Name + \"!\" + alias\n\tp.path_to_name[path.Name] = fullName\n\tp.pfc.add_package_to_scope(fullName, path.Name)\n}", "func (w *bodyBase) declared(name string) bool {\n\tfor _, s := range w.list {\n\t\tif decl, ok := s.(*Declare); ok && decl.name == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func dumpDecls(p *csym.Parser, outputDir string) error {\n\t// Create output file.\n\tdeclsPath := filepath.Join(outputDir, declsName)\n\tfmt.Println(\"creating:\", declsPath)\n\tf, err := os.Create(declsPath)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"unable to create declarations header %q\", declsPath)\n\t}\n\tdefer f.Close()\n\t// Store declarations of default binary.\n\tif err := dumpOverlay(f, p.Overlay); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\t// Store declarations of overlays.\n\tfor _, overlay := range p.Overlays {\n\t\toverlayName := fmt.Sprintf(overlayNameFormat, overlay.ID)\n\t\toverlayPath := filepath.Join(outputDir, overlayName)\n\t\tfmt.Println(\"creating:\", overlayPath)\n\t\tf, err := os.Create(overlayPath)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"unable to create overlay header %q\", overlayPath)\n\t\t}\n\t\tdefer f.Close()\n\t\tif err := dumpOverlay(f, overlay); err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t}\n\treturn nil\n}", "func ParseForwardPorts(h IptablesHandler, nat string, chain string) ([]int, error) {\n\trules, err := h.IptablesListRules(nat, chain)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treason := \"\"\n\tports := make([]int, 0)\n\tfor _, rule := range rules {\n\t\tflags := pflag.NewFlagSet(\"iptables-flag\", pflag.ContinueOnError)\n\t\tflags.ParseErrorsWhitelist.UnknownFlags = true\n\t\tforwardPort := flags.Int(\"dport\", 0, \"\")\n\t\terr := flags.Parse(strings.Split(rule, \" \"))\n\t\tif err != nil {\n\t\t\treason = fmt.Sprintf(\"%s; %s\", reason, err.Error())\n\t\t} else if *forwardPort != 0 {\n\t\t\tports = append(ports, *forwardPort)\n\t\t}\n\t}\n\n\treturn ports, nil\n}", "func convertDependencies(deps []string) []*license_metadata_proto.AnnotatedDependency {\n\tvar ret []*license_metadata_proto.AnnotatedDependency\n\n\tfor _, d := range deps {\n\t\tcomponents := strings.Split(d, \":\")\n\t\tdep := components[0]\n\t\tcomponents = components[1:]\n\t\tad := &license_metadata_proto.AnnotatedDependency{\n\t\t\tFile: proto.String(dep),\n\t\t\tAnnotations: make([]string, 0, len(components)),\n\t\t}\n\t\tfor _, ann := range components {\n\t\t\tif len(ann) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tad.Annotations = append(ad.Annotations, ann)\n\t\t}\n\t\tret = append(ret, ad)\n\t}\n\n\treturn ret\n}", "func (g *Generator) declareIndexAndNameVar(run []Value, typeName string) {\n\tindex, name := g.createIndexAndNameDecl(run, typeName, \"\")\n\tg.Printf(\"const %s\\n\", name)\n\tg.Printf(\"var %s\\n\", index)\n}", "func Predeclared() starlark.StringDict {\n\treturn starlark.StringDict{\n\t\t\"base64\": NewBase64Module(),\n\t\t\"uuid\": NewUUIDModule(),\n\t\t\"http\": NewHTTPModule(),\n\t\t\"struct\": starlark.NewBuiltin(\"struct\", StructFn),\n\t}\n}", "func (g *Generator) declareIndexAndNameVars(runs [][]Value, typeName string) {\n\tvar indexes, names []string\n\tfor i, run := range runs {\n\t\tindex, name := g.createIndexAndNameDecl(run, typeName, fmt.Sprintf(\"_%d\", i))\n\t\tif len(run) != 1 {\n\t\t\tindexes = append(indexes, index)\n\t\t}\n\t\tnames = append(names, name)\n\t}\n\tg.Printf(\"const (\\n\")\n\tfor _, name := range names {\n\t\tg.Printf(\"\\t%s\\n\", name)\n\t}\n\tg.Printf(\")\\n\\n\")\n\n\tif len(indexes) > 0 {\n\t\tg.Printf(\"var (\")\n\t\tfor _, index := range indexes {\n\t\t\tg.Printf(\"\\t%s\\n\", index)\n\t\t}\n\t\tg.Printf(\")\\n\\n\")\n\t}\n}", "func (p *Parser) ParseDecls(syms []*sym.Symbol) {\n\tfor i := 0; i < len(syms); i++ {\n\t\ts := syms[i]\n\t\tswitch body := s.Body.(type) {\n\t\tcase *sym.Name1:\n\t\t\tp.parseSymbol(s.Hdr.Value, body.Name)\n\t\tcase *sym.Name2:\n\t\t\tp.parseSymbol(s.Hdr.Value, body.Name)\n\t\tcase *sym.SetSLD2:\n\t\t\tn := p.parseLineNumbers(s.Hdr.Value, body, syms[i+1:])\n\t\t\ti += n\n\t\tcase *sym.FuncStart:\n\t\t\tn := p.parseFunc(s.Hdr.Value, body, syms[i+1:])\n\t\t\ti += n\n\t\tcase *sym.Def:\n\t\t\tswitch body.Class {\n\t\t\tcase sym.ClassEXT, sym.ClassSTAT:\n\t\t\t\tt := p.parseType(body.Type, nil, \"\")\n\t\t\t\tp.parseGlobalDecl(s.Hdr.Value, body.Size, body.Class, t, body.Name)\n\t\t\tcase sym.ClassMOS, sym.ClassSTRTAG, sym.ClassMOU, sym.ClassUNTAG, sym.ClassTPDEF, sym.ClassENTAG, sym.ClassMOE, sym.ClassFIELD:\n\t\t\t\t// nothing to do.\n\t\t\tdefault:\n\t\t\t\tpanic(fmt.Sprintf(\"support for symbol class %q not yet implemented\", body.Class))\n\t\t\t}\n\t\tcase *sym.Def2:\n\t\t\tswitch body.Class {\n\t\t\tcase sym.ClassEXT, sym.ClassSTAT:\n\t\t\t\tt := p.parseType(body.Type, body.Dims, body.Tag)\n\t\t\t\tp.parseGlobalDecl(s.Hdr.Value, body.Size, body.Class, t, body.Name)\n\t\t\tcase sym.ClassMOS, sym.ClassMOU, sym.ClassTPDEF, sym.ClassMOE, sym.ClassFIELD, sym.ClassEOS:\n\t\t\t\t// nothing to do.\n\t\t\tdefault:\n\t\t\t\tpanic(fmt.Sprintf(\"support for symbol class %q not yet implemented\", body.Class))\n\t\t\t}\n\t\tcase *sym.Overlay:\n\t\t\tp.parseOverlay(s.Hdr.Value, body)\n\t\tcase *sym.SetOverlay:\n\t\t\toverlay, ok := p.overlayIDs[s.Hdr.Value]\n\t\t\tif !ok {\n\t\t\t\tpanic(fmt.Errorf(\"unable to locate overlay with ID %x\", s.Hdr.Value))\n\t\t\t}\n\t\t\tp.curOverlay = overlay\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"support for symbol type %T not yet implemented\", body))\n\t\t}\n\t}\n}", "func ParseDeps(fileName string) []string {\n\tfset := token.NewFileSet()\n\n\tf, err := parser.ParseFile(fset, fileName, nil, parser.ImportsOnly)\n\tCheck(err)\n\n\tdepsArray := make([]string, len(f.Imports))\n\tfor index, s := range f.Imports {\n\t\tdepName := strings.Replace(s.Path.Value, string('\"'), \" \", 2)\n\t\tdepName = strings.Replace(depName, \" \", \"\", 10)\n\t\tdepsArray[index] = depName\n\t}\n\n\treturn depsArray\n}", "func (s *BasePlSqlParserListener) EnterSeq_of_declare_specs(ctx *Seq_of_declare_specsContext) {}", "func buildDependencies(fdSet *dpb.FileDescriptorSet) {\n\t// Dependency to google/api/annotations.proto for gRPC-HTTP transcoding. Here a couple of problems arise:\n\t// 1. Problem: \tWe cannot call descriptor.ForMessage(&annotations.E_Http), which would be our\n\t//\t\t\t\trequired dependency. However, we can call descriptor.ForMessage(&http) and\n\t//\t\t\t\tthen construct the extension manually.\n\t// 2. Problem: \tThe name is set wrong.\n\t// 3. Problem: \tgoogle/api/annotations.proto has a dependency to google/protobuf/descriptor.proto.\n\thttp := annotations.Http{}\n\tfd, _ := descriptor.MessageDescriptorProto(&http)\n\n\textensionName := \"http\"\n\tn := \"google/api/annotations.proto\"\n\tl := dpb.FieldDescriptorProto_LABEL_OPTIONAL\n\tt := dpb.FieldDescriptorProto_TYPE_MESSAGE\n\ttName := \"google.api.HttpRule\"\n\textendee := \".google.protobuf.MethodOptions\"\n\n\thttpExtension := &dpb.FieldDescriptorProto{\n\t\tName: &extensionName,\n\t\tNumber: &annotations.E_Http.Field,\n\t\tLabel: &l,\n\t\tType: &t,\n\t\tTypeName: &tName,\n\t\tExtendee: &extendee,\n\t}\n\n\tfd.Extension = append(fd.Extension, httpExtension) // 1. Problem\n\tfd.Name = &n // 2. Problem\n\tfd.Dependency = append(fd.Dependency, \"google/protobuf/descriptor.proto\") //3.rd Problem\n\n\t// Build other required dependencies\n\te := empty.Empty{}\n\tfdp := dpb.DescriptorProto{}\n\tfd2, _ := descriptor.MessageDescriptorProto(&e)\n\tfd3, _ := descriptor.MessageDescriptorProto(&fdp)\n\tdependencies := []*dpb.FileDescriptorProto{fd, fd2, fd3}\n\n\t// According to the documentation of protoReflect.CreateFileDescriptorFromSet the file I want to print\n\t// needs to be at the end of the array. All other FileDescriptorProto are dependencies.\n\tfdSet.File = append(dependencies, fdSet.File...)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
isPureVirtualDefinition ... Returns whether a function is pure virtual.
func isPureVirtualDefinition(line string) bool { line = strings.Replace(line, " ", "", -1) return (strings.Contains(line, "virtual") && strings.Contains(line, "=0;")) }
[ "func (f *Field) Virtual() bool {\n\treturn f.hasOpt(\"virtual\")\n}", "func (w column) IsVirtual() bool {\n\treturn w.desc.Virtual\n}", "func (tp Type) IsVirtualTable() bool {\n\treturn tp == VirtualTable\n}", "func (c Cursor) CXXMethod_IsVirtual() bool {\n\to := C.clang_CXXMethod_isVirtual(c.c)\n\tif o != 0 {\n\t\treturn true\n\t}\n\treturn false\n}", "func (b *IBFCell) IsPure() bool {\n\treturn (b.Count == 1 || b.Count == -1) && b.HashSum.Uint64() == checkSumHash(b.IDSum.Uint64())\n}", "func (c Cursor) IsVirtualBase() bool {\n\to := C.clang_isVirtualBase(c.c)\n\treturn o == C.uint(1)\n}", "func MethodToPureFunc(self, dst interface{}, methodName string) bool {\r\n\tm, ok := reflect.TypeOf(self).MethodByName(methodName)\r\n\treturn ok && castFunc(m.Func, dst)\r\n}", "func (d *portworx) IsPureFileVolume(volume *torpedovolume.Volume) (bool, error) {\n\tvar proxySpec *api.ProxySpec\n\tvar err error\n\tif proxySpec, err = d.getProxySpecForAVolume(volume); err != nil {\n\t\treturn false, err\n\t}\n\tif proxySpec == nil {\n\t\treturn false, nil\n\t}\n\n\tif proxySpec.ProxyProtocol == api.ProxyProtocol_PROXY_PROTOCOL_PURE_FILE {\n\t\tlog.Debugf(\"Volume [%s] is Pure File volume\", volume.ID)\n\t\treturn true, nil\n\t}\n\n\tlog.Debugf(\"Volume [%s] is not Pure File volume\", volume.ID)\n\treturn false, nil\n}", "func (s *Session) isPureExpr(expr ast.Expr) bool {\n\tif expr == nil {\n\t\treturn true\n\t}\n\n\tswitch expr := expr.(type) {\n\tcase *ast.Ident:\n\t\treturn true\n\tcase *ast.BasicLit:\n\t\treturn true\n\tcase *ast.BinaryExpr:\n\t\treturn s.isPureExpr(expr.X) && s.isPureExpr(expr.Y)\n\tcase *ast.CallExpr:\n\t\ttv := s.TypeInfo.Types[expr.Fun]\n\t\tfor _, arg := range expr.Args {\n\t\t\tif s.isPureExpr(arg) == false {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\tif tv.IsType() {\n\t\t\treturn true\n\t\t}\n\n\t\tif tv.IsBuiltin() {\n\t\t\tif ident, ok := expr.Fun.(*ast.Ident); ok {\n\t\t\t\tif pureBuiltinFuncNames[ident.Name] {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn false\n\tcase *ast.CompositeLit:\n\t\treturn true\n\tcase *ast.FuncLit:\n\t\treturn true\n\tcase *ast.IndexExpr:\n\t\treturn s.isPureExpr(expr.X) && s.isPureExpr(expr.Index)\n\tcase *ast.SelectorExpr:\n\t\treturn s.isPureExpr(expr.X)\n\tcase *ast.SliceExpr:\n\t\treturn s.isPureExpr(expr.Low) && s.isPureExpr(expr.High) && s.isPureExpr(expr.Max)\n\tcase *ast.StarExpr:\n\t\treturn s.isPureExpr(expr.X)\n\tcase *ast.TypeAssertExpr:\n\t\treturn true\n\tcase *ast.UnaryExpr:\n\t\treturn s.isPureExpr(expr.X)\n\tcase *ast.ParenExpr:\n\t\treturn s.isPureExpr(expr.X)\n\n\tcase *ast.InterfaceType:\n\t\treturn true\n\tcase *ast.ArrayType:\n\t\treturn true\n\tcase *ast.ChanType:\n\t\treturn true\n\tcase *ast.KeyValueExpr:\n\t\treturn true\n\tcase *ast.MapType:\n\t\treturn true\n\tcase *ast.StructType:\n\t\treturn true\n\tcase *ast.FuncType:\n\t\treturn true\n\n\tcase *ast.Ellipsis:\n\t\treturn true\n\n\tcase *ast.BadExpr:\n\t\treturn false\n\t}\n\n\treturn false\n}", "func (o *StorageR0DriveAllOf) HasVirtualDrivePolicy() bool {\n\tif o != nil && o.VirtualDrivePolicy.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (d *portworx) IsPureVolume(volume *torpedovolume.Volume) (bool, error) {\n\tvar proxySpec *api.ProxySpec\n\tvar err error\n\tif proxySpec, err = d.getProxySpecForAVolume(volume); err != nil {\n\t\treturn false, err\n\t}\n\n\tif proxySpec == nil {\n\t\treturn false, nil\n\t}\n\n\tif proxySpec.ProxyProtocol == api.ProxyProtocol_PROXY_PROTOCOL_PURE_BLOCK || proxySpec.ProxyProtocol == api.ProxyProtocol_PROXY_PROTOCOL_PURE_FILE {\n\t\tlog.Debugf(\"Volume [%s] is Pure volume\", volume.ID)\n\t\treturn true, nil\n\t}\n\n\tlog.Debugf(\"Volume [%s] is not Pure Block volume\", volume.ID)\n\treturn false, nil\n}", "func (r DynamicRole) IsVirtualRole() bool {\n\tswitch r {\n\tcase DynamicRoleVirtualExecutor:\n\t\treturn true\n\tcase DynamicRoleVirtualValidator:\n\t\treturn true\n\t}\n\treturn false\n}", "func IsVirtualNode(node *corev1.Node) bool {\n\tnodeType, found := node.Labels[liqoconst.TypeLabel]\n\treturn found && nodeType == liqoconst.TypeNode\n}", "func (f FooBarProps) IsProps() {}", "func (t FuncType) IsFuncType() bool { return true }", "func (m *Method) IsStrict() bool {\n\treturn m.MaybeStrict == nil || *m.MaybeStrict\n}", "func (ExprType) HasMethod(fn string) bool { return boolResult }", "func (o *HyperflexHealthCheckDefinition) HasScriptExecutionMode() bool {\n\tif o != nil && o.ScriptExecutionMode != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func Xsqlite3NotPureFunc(tls *libc.TLS, pCtx uintptr) int32 {\n\tbp := tls.Alloc(16)\n\tdefer tls.Free(16)\n\n\tvar pOp uintptr\n\tif (*Sqlite3_context)(unsafe.Pointer(pCtx)).FpVdbe == uintptr(0) {\n\t\treturn 1\n\t}\n\tpOp = (*Vdbe)(unsafe.Pointer((*Sqlite3_context)(unsafe.Pointer(pCtx)).FpVdbe)).FaOp + uintptr((*Sqlite3_context)(unsafe.Pointer(pCtx)).FiOp)*24\n\tif int32((*VdbeOp)(unsafe.Pointer(pOp)).Fopcode) == OP_PureFunc {\n\t\tvar zContext uintptr\n\t\tvar zMsg uintptr\n\t\tif int32((*VdbeOp)(unsafe.Pointer(pOp)).Fp5)&NC_IsCheck != 0 {\n\t\t\tzContext = ts + 5130\n\t\t} else if int32((*VdbeOp)(unsafe.Pointer(pOp)).Fp5)&NC_GenCol != 0 {\n\t\t\tzContext = ts + 5149\n\t\t} else {\n\t\t\tzContext = ts + 5168\n\t\t}\n\t\tzMsg = Xsqlite3_mprintf(tls, ts+5177,\n\t\t\tlibc.VaList(bp, (*FuncDef)(unsafe.Pointer((*Sqlite3_context)(unsafe.Pointer(pCtx)).FpFunc)).FzName, zContext))\n\t\tXsqlite3_result_error(tls, pCtx, zMsg, -1)\n\t\tXsqlite3_free(tls, zMsg)\n\t\treturn 0\n\t}\n\treturn 1\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
templatedTypeSeparators ... Used to expand templated types such as QMap>
func templatedTypeSeparators (r rune) bool { return r == '<' || r == '>' || r == ',' }
[ "func TemplateDelim(str string, data interface{}, begin, end string) (string, error) {\n\ttmpl, err := template.New(\"test\").Funcs(fmap).Delims(begin, end).Parse(str)\n\tif err == nil {\n\t\tvar doc bytes.Buffer\n\t\terr = tmpl.Execute(&doc, data)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn strings.Replace(doc.String(), \"<no value>\", \"\", -1), nil\n\t}\n\treturn \"\", err\n}", "func mapKeyType(v interface{}) string {\n\tstr := toString(v)\n\tkey, value, found := stringsCut(str, \",\")\n\tif !found || !strings.HasPrefix(key, \"map<\") || !strings.HasSuffix(value, \">\") {\n\t\tpanic(fmt.Errorf(\"mapKeyValue: expected map<Type1,Type2>, got %v\", str))\n\t}\n\treturn strings.TrimPrefix(key, \"map<\")\n}", "func (tp *Template) Delims(left, right string) *Template {\n\ttp.leftDelim = left\n\ttp.rightDelim = right\n\treturn tp\n}", "func (c *Config) delims(level string) (string, string) {\n\n\tif level == \"deploy\" {\n\t\tif config.DeployDelimiter != \"\" {\n\t\t\tdelims := strings.Split(config.GenerateDelimiter, \":\")\n\t\t\treturn delims[0], delims[1]\n\t\t}\n\t\t// default\n\t\treturn \"<<\", \">>\"\n\t}\n\n\tif config.GenerateDelimiter != \"\" {\n\t\tdelims := strings.Split(config.GenerateDelimiter, \":\")\n\t\treturn delims[0], delims[1]\n\t}\n\t// default\n\treturn \"{{\", \"}}\"\n}", "func flattenInspectTemplateInspectConfigInfoTypes(c *Client, i interface{}, res *InspectTemplate) *InspectTemplateInspectConfigInfoTypes {\n\tm, ok := i.(map[string]interface{})\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tr := &InspectTemplateInspectConfigInfoTypes{}\n\n\tif dcl.IsEmptyValueIndirect(i) {\n\t\treturn EmptyInspectTemplateInspectConfigInfoTypes\n\t}\n\tr.Name = dcl.FlattenString(m[\"name\"])\n\n\treturn r\n}", "func rustType(s string) template.HTML {\n\tswitch s {\n\tcase \"string\":\n\t\treturn \"String\"\n\tcase \"int\":\n\t\treturn \"i64\"\n\tcase \"int8\":\n\t\treturn \"i8\"\n\tcase \"int16\":\n\t\treturn \"i16\"\n\tcase \"int32\":\n\t\treturn \"i32\"\n\tcase \"int64\":\n\t\treturn \"i64\"\n\tcase \"uint\":\n\t\treturn \"u64\"\n\tcase \"uint8\":\n\t\treturn \"u8\"\n\tcase \"uint16\":\n\t\treturn \"u16\"\n\tcase \"uint32\":\n\t\treturn \"u32\"\n\tcase \"uint64\":\n\t\treturn \"u64\"\n\tcase \"float32\":\n\t\treturn \"f32\"\n\tcase \"float64\":\n\t\treturn \"f64\"\n\tcase \"interface{}\":\n\t\treturn \"Value\"\n\tdefault:\n\t\tif strings.HasPrefix(s, \"map[\") {\n\t\t\tkeyType := rustType(s[4 : 4+strings.Index(s[4:], \"]\")])\n\t\t\tvalueType := rustType(s[5+len(keyType):])\n\t\t\t//return template.HTML(\"Map<String, Value>\")\n\t\t\treturn template.HTML(fmt.Sprintf(\"std::collections::HashMap<%s, %s>\", keyType, valueType))\n\t\t} else if strings.HasPrefix(s, \"*\") {\n\t\t\t// Remove indirection\n\t\t\treturn rustType(s[1:])\n\t\t}\n\t\treturn template.HTML(s)\n\t}\n}", "func printTypedNames(w io.Writer, prefix string, ns []TypedEntry) {\n\tif len(ns) == 0 {\n\t\treturn\n\t}\n\ttprev := typeString(ns[0].Types)\n\tsep := prefix\n\tfor _, n := range ns {\n\t\ttcur := typeString(n.Types)\n\t\tif tcur != tprev {\n\t\t\tif tprev == \"\" {\n\t\t\t\t// Should be impossible.\n\t\t\t\tpanic(n.Location.String() + \": untyped declarations in the middle of a typed list\")\n\t\t\t}\n\t\t\tfmt.Fprintf(w, \" - %s\", tprev)\n\t\t\ttprev = tcur\n\t\t\tsep = prefix\n\t\t\tif sep == \"\" {\n\t\t\t\tsep = \" \"\n\t\t\t}\n\t\t}\n\t\tfmt.Fprintf(w, \"%s%s\", sep, n.Str)\n\t\tsep = \" \"\n\t}\n\tif tprev != \"\" {\n\t\tfmt.Fprintf(w, \" - %s\", tprev)\n\t}\n}", "func mapValueType(v interface{}) string {\n\tstr := toString(v)\n\tkey, value, found := stringsCut(str, \",\")\n\tif !found || !strings.HasPrefix(key, \"map<\") || !strings.HasSuffix(value, \">\") {\n\t\tpanic(fmt.Errorf(\"mapKeyValue: expected map<Type1,Type2>, got %v\", str))\n\t}\n\treturn strings.TrimSuffix(value, \">\")\n}", "func isGenericDelim(r rune) bool {\n\treturn isSpace(r) || isLeftDelim(r) || isRightDelim(r)\n}", "func (t PathType) Separator() string {\n\tswitch t {\n\tcase Relative:\n\t\treturn \".\"\n\tcase Absolute:\n\t\treturn \"/\"\n\tdefault:\n\t\treturn \"\"\n\t}\n}", "func (p *Params) TypeList(local string, resolver Resolver) string {\n\tbuf := bytes.Buffer{}\n\n\tfor i := 0; i < p.Len(); i++ {\n\t\tpd := p.data[i]\n\t\tif buf.Len() > 0 {\n\t\t\tbuf.WriteString(\", \")\n\t\t}\n\t\tbuf.WriteString(pd.Type.ShortName(local, resolver))\n\t}\n\tif p.variadic {\n\t\tif buf.Len() > 0 {\n\t\t\tbuf.WriteString(\", \")\n\t\t}\n\t\tvp := p.Variadic()\n\t\tut := Type{typ: vp.Type.typ.(*types.Slice).Elem()}\n\t\tbuf.WriteString(\"...\")\n\t\tbuf.WriteString(ut.ShortName(local, resolver))\n\t}\n\treturn buf.String()\n}", "func (gen *jsGenerator) formatType(t *idl.Type) string {\n\tvar s string\n\tms, ok := jsTypes[t.Name]\n\tif !ok {\n\t\tms = t.Name\n\t}\n\tif t.Name == \"list\" {\n\t\ts = fmt.Sprintf(ms, gen.formatType(t.ValueType))\n\t} else if t.Name == \"map\" {\n\t\ts = fmt.Sprintf(ms, jsTypes[t.KeyType.Name], gen.formatType(t.ValueType))\n\t} else if t.IsPrimitive() && t.Name != \"string\" {\n\t\ts = ms + \"?\"\n\t} else if t.IsEnum(gen.tplRootIdl) {\n\t\ts = ms + \"?\"\n\t} else {\n\t\ts = ms\n\t}\n\treturn s\n}", "func QualifiedTypeName(t DataType) string {\n\tswitch t.Kind() {\n\tcase ArrayKind:\n\t\ta := t.(*Array)\n\t\treturn fmt.Sprintf(\"%s<%s>\",\n\t\t\tt.Name(),\n\t\t\tQualifiedTypeName(a.ElemType.Type),\n\t\t)\n\tcase MapKind:\n\t\th := t.(*Map)\n\t\treturn fmt.Sprintf(\"%s<%s, %s>\",\n\t\t\tt.Name(),\n\t\t\tQualifiedTypeName(h.KeyType.Type),\n\t\t\tQualifiedTypeName(h.ElemType.Type),\n\t\t)\n\t}\n\treturn t.Name()\n}", "func (t *Template) WithDelims(left, right string) *Template {\n\tt.leftDelim = left\n\tt.rightDelim = right\n\treturn t\n}", "func expandInspectTemplateInspectConfigInfoTypes(c *Client, f *InspectTemplateInspectConfigInfoTypes, res *InspectTemplate) (map[string]interface{}, error) {\n\tif f == nil {\n\t\treturn nil, nil\n\t}\n\n\tm := make(map[string]interface{})\n\tif v := f.Name; !dcl.IsEmptyValueIndirect(v) {\n\t\tm[\"name\"] = v\n\t}\n\n\treturn m, nil\n}", "func typeNames(vars []*types.Var) string {\n\tif len(vars) == 0 {\n\t\treturn \"\"\n\t}\n\tvar buf strings.Builder\n\tfor i, v := range vars {\n\t\tif i != 0 {\n\t\t\tbuf.WriteString(\", \")\n\t\t}\n\t\tbuf.WriteString(nameOf(v.Type()))\n\t}\n\treturn buf.String()\n}", "func FormatTypeParams(tparams *typeparams.TypeParamList) string {\n\tif tparams == nil || tparams.Len() == 0 {\n\t\treturn \"\"\n\t}\n\tvar buf bytes.Buffer\n\tbuf.WriteByte('[')\n\tfor i := 0; i < tparams.Len(); i++ {\n\t\tif i > 0 {\n\t\t\tbuf.WriteString(\", \")\n\t\t}\n\t\tbuf.WriteString(tparams.At(i).Obj().Name())\n\t}\n\tbuf.WriteByte(']')\n\treturn buf.String()\n}", "func flattenInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePath(c *Client, i interface{}, res *InspectTemplate) *InspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePath {\n\tm, ok := i.(map[string]interface{})\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tr := &InspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePath{}\n\n\tif dcl.IsEmptyValueIndirect(i) {\n\t\treturn EmptyInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePath\n\t}\n\tr.Path = dcl.FlattenString(m[\"path\"])\n\n\treturn r\n}", "func typeinfo(list ...interface{}) string {\n\tvar buf strings.Builder\n\tfor i, item := range list {\n\t\tif i > 0 {\n\t\t\tbuf.WriteString(\",\")\n\t\t}\n\t\tbuf.WriteString(fmt.Sprintf(\"%d:%T:%v\", i, item, item))\n\t}\n\treturn buf.String()\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
isEtcdConfigFile returns whether the given path looks like a configuration file, and in that case it returns the corresponding hash to detect modifications.
func isEtcdConfigFile(path string) (bool, fhash) { if info, err := os.Stat(path); err != nil || info.IsDir() { return false, fhash{} } b, err := os.ReadFile(path) if err != nil { return false, fhash{} } // search for the "endpoints:" string if strings.Contains(string(b), "endpoints:") { return true, sha256.Sum256(b) } return false, fhash{} }
[ "func TestConfigFile(t *testing.T) {\n\tsystemdTests := []sString{\n\t\t{\n\t\t\tinput: \"nanobox-server\",\n\t\t\texpected: \"/etc/systemd/system/nanobox-server.service\",\n\t\t},\n\t\t{\n\t\t\tinput: \"nanobox\",\n\t\t\texpected: \"/etc/systemd/system/nanobox.service\",\n\t\t},\n\t}\n\n\tupstartTests := []sString{\n\t\t{\n\t\t\tinput: \"nanobox-server\",\n\t\t\texpected: \"/etc/init/nanobox-server.conf\",\n\t\t},\n\t\t{\n\t\t\tinput: \"nanobox\",\n\t\t\texpected: \"/etc/init/nanobox.conf\",\n\t\t},\n\t}\n\n\tinitSystem = \"systemd\"\n\tfor i := range systemdTests {\n\t\tout := configFile(systemdTests[i].input)\n\t\tif out != systemdTests[i].expected {\n\t\t\tt.Errorf(\"Failed to return correct systemd config path. Got '%s'\", out)\n\t\t}\n\t}\n\n\tinitSystem = \"upstart\"\n\tfor i := range upstartTests {\n\t\tout := configFile(upstartTests[i].input)\n\t\tif out != upstartTests[i].expected {\n\t\t\tt.Errorf(\"Failed to return correct upstart config path. Got '%s'\", out)\n\t\t}\n\t}\n\n\tinitSystem = \"fake\"\n\tif out := configFile(\"nanobox-server\"); out != \"\" {\n\t\tt.Errorf(\"Failed to return empty string. Got '%s'\", out)\n\t}\n}", "func (p *Patch) ConfigChanged(remotePath string) bool {\n\tfor _, patchPart := range p.Patches {\n\t\tif patchPart.ModuleName == \"\" {\n\t\t\tfor _, summary := range patchPart.PatchSet.Summary {\n\t\t\t\tif summary.Name == remotePath {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\treturn false\n}", "func (config Config) HasLoadedConfigurationFileBeenModified() bool {\n\tif fileInfo, err := os.Stat(config.filePath); err == nil {\n\t\tif !fileInfo.ModTime().IsZero() {\n\t\t\treturn config.lastFileModTime.Unix() != fileInfo.ModTime().Unix()\n\t\t}\n\t}\n\treturn false\n}", "func isConfigState(e *yang.Entry) bool {\n\treturn e.IsDir() && (e.Name == \"config\" || e.Name == \"state\")\n}", "func kubeconfigExistsAndIsFile(filePath string) (bool, error) {\n\tinfo, err := os.Stat(filePath)\n\t// if we couldn't open the file, error out\n\tif err != nil {\n\t\treturn false, err\n\t}\n\t// if we have a directory instead of a file, error out\n\tif info.IsDir() {\n\t\treturn false, errors.New(\"The provided path was a directory. Expected a file.\")\n\t}\n\treturn true, err\n}", "func (e VCSEntry) GetConfigFile() string {\n\tif e.Path == \"\" {\n\t\treturn constants.DefaultConfigYaml\n\t}\n\treturn e.Path\n}", "func (h *manager) checkNeedReload(configFile string) (bool, string, *types.HealthCheckConfig) {\n\thash, err := hashFile(configFile)\n\tif err != nil {\n\t\tklog.Errorf(\"failed hash config file: %v\", err)\n\t\treturn false, \"\", nil\n\t}\n\tif hash == h.configHash {\n\t\treturn false, \"\", nil\n\t}\n\tconfig, err := h.configUpdateFunc(configFile)\n\tif err != nil {\n\t\tklog.Fatalf(\"failed init health check config: %v\", err)\n\t}\n\tif len(config.RuleNodes) != 0 {\n\t\tfound := false\n\t\tfor _, no := range config.RuleNodes {\n\t\t\tif no == util.NodeIP() {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\treturn false, \"\", nil\n\t\t}\n\t}\n\n\treturn true, hash, config\n}", "func checkAndCreateConfigFile(ctx context.Context, serviceUUID string, configDirPath string, cfg *common.ConfigID, dbIns db.DB, requuid string) error {\n\t// check and create the config file if necessary\n\tfpath := filepath.Join(configDirPath, cfg.FileName)\n\texist, err := utils.IsFileExist(fpath)\n\tif err != nil {\n\t\tglog.Errorln(\"check the config file error\", err, fpath, \"requuid\", requuid)\n\t\treturn err\n\t}\n\n\tif exist {\n\t\t// the config file exists, check whether it is the same with the config in DB.\n\t\tfdata, err := ioutil.ReadFile(fpath)\n\t\tif err != nil {\n\t\t\tglog.Errorln(\"read the config file error\", err, fpath, \"requuid\", requuid)\n\t\t\treturn err\n\t\t}\n\n\t\tfmd5 := md5.Sum(fdata)\n\t\tfmd5str := hex.EncodeToString(fmd5[:])\n\t\tif fmd5str == cfg.FileMD5 {\n\t\t\tglog.Infoln(\"the config file has the latest content\", fpath, \"requuid\", requuid)\n\t\t\treturn nil\n\t\t}\n\t\t// the config content is changed, update the config file\n\t\tglog.Infoln(\"the config file changed, update it\", fpath, \"requuid\", requuid)\n\t}\n\n\t// the config file not exists or content is changed\n\t// read the content\n\tcfgFile, err := dbIns.GetConfigFile(ctx, serviceUUID, cfg.FileID)\n\tif err != nil {\n\t\tglog.Errorln(\"GetConfigFile error\", err, fpath, \"requuid\", requuid)\n\t\treturn err\n\t}\n\n\tdata := []byte(cfgFile.Spec.Content)\n\terr = utils.CreateOrOverwriteFile(fpath, data, os.FileMode(cfgFile.Spec.FileMode))\n\tif err != nil {\n\t\tglog.Errorln(\"write the config file error\", err, fpath, \"requuid\", requuid)\n\t\treturn err\n\t}\n\n\tglog.Infoln(\"write the config file done\", fpath, \"requuid\", requuid)\n\treturn nil\n}", "func (b *backend) pathConfigExists(ctx context.Context, req *logical.Request, _ *framework.FieldData) (bool, error) {\n\tentry, err := req.Storage.Get(ctx, \"config\")\n\tif err != nil {\n\t\treturn false, errwrap.Wrapf(\"failed to get configuration from storage: {{err}}\", err)\n\t}\n\tif entry == nil || len(entry.Value) == 0 {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}", "func EtcdPath() string {\n\treturn \"/phonebook/\"\n}", "func (persistence *Persistence) HasConfig(name string) bool {\n\tfile := path.Join(persistence.configDir, name+\".json\")\n\tif info, err := os.Stat(file); err == nil && !info.IsDir() {\n\t\treturn true\n\t}\n\treturn false\n}", "func (flogs *fileLogs) Equal(config dvid.StoreConfig) bool {\n\tpath, _, err := parseConfig(config)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn path == flogs.path\n}", "func readConfigFile(path string) string {\n\tdata, err := ioutil.ReadFile(path)\n\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(constants.ConfigFileNotFound)\n\t\treturn \"\"\n\t}\n\n\treturn string(data)\n}", "func TestConfigRepository_GetConfigFromPath(t *testing.T) {\n\ttmpFile, err := ioutil.TempFile(os.TempDir(), \"test-config-GetConfigFromPath-\")\n\tif assert.NoError(t, err) {\n\t\tdefer os.Remove(tmpFile.Name())\n\t\t_, _ = tmpFile.WriteString(\"{}\")\n\n\t\trepository := NewConfigRepository()\n\t\t_, err := repository.GetConfigFromPath(\"\", tmpFile.Name())\n\t\tassert.NoError(t, err)\n\t}\n}", "func configExistsInPath(path string) bool {\n\t// Needed for testing\n\tif config != nil {\n\t\treturn true\n\t}\n\n\t// Check devspace.yaml\n\t_, err := os.Stat(filepath.Join(path, constants.DefaultConfigPath))\n\tif err == nil {\n\t\treturn true\n\t}\n\n\t// Check devspace-configs.yaml\n\t_, err = os.Stat(filepath.Join(path, constants.DefaultConfigsPath))\n\tif err == nil {\n\t\treturn true\n\t}\n\n\treturn false // Normal config file found\n}", "func isMcConfigExists() bool {\n\tconfigFile, err := getMcConfigPath()\n\tif err != nil {\n\t\treturn false\n\t}\n\t_, err = os.Stat(configFile)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn true\n}", "func (a *App) SaveEtcdConfig(cfg *config.Etcd) error {\n\treturn saveEtcdConfig(cfg)\n}", "func TestConfigIsJSON(t *testing.T) {\n\thomedir := os.Getenv(\"HOME\")\n\tconfigpath := (homedir + \"/.stalker.json\")\n\tfile, _ := os.Open(configpath)\n\n\tdecoder := json.NewDecoder(file)\n\tconfiguration := Configuration{}\n\terr := decoder.Decode(&configuration)\n\n\tif err != nil {\n\t\tt.Error(err)\n\t} else {\n\t\tt.Log(\"config is valid JSON\")\n\t}\n}", "func configExistsInPath(path string) bool {\n\t// Check devspace.yaml\n\t_, err := os.Stat(filepath.Join(path, constants.DefaultConfigPath))\n\tif err == nil {\n\t\treturn true\n\t}\n\n\t// Check devspace-configs.yaml\n\t_, err = os.Stat(filepath.Join(path, constants.DefaultConfigsPath))\n\tif err == nil {\n\t\treturn true\n\t}\n\n\treturn false // Normal config file found\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
getDirection from byte. Panics on unknown direction.
func getDirection(d byte) direction { switch d { case 'R': return right case 'L': return left case 'U': return up case 'D': return down default: panic(fmt.Sprintf("unknown direction %v", d)) } }
[ "func convertDirectionCode(gpxDirCode string) uint8 {\n\n\tbrytonDirCode := DirectionCodeGoAhead\n\n\tswitch gpxDirCode {\n\tcase \"tshl\":\n\t\tbrytonDirCode = DirectionCodeCloseLeft\n\tcase \"left\":\n\t\tbrytonDirCode = DirectionCodeLeft\n\tcase \"tsll\":\n\t\tbrytonDirCode = DirectionCodeSlightLeft\n\tcase \"straight\":\n\t\tbrytonDirCode = DirectionCodeGoAhead\n\tcase \"tslr\":\n\t\tbrytonDirCode = DirectionCodeSlightRight\n\tcase \"right\":\n\t\tbrytonDirCode = DirectionCodeRight\n\tcase \"tshr\":\n\t\tbrytonDirCode = DirectionCodeCloseRight\n\tdefault:\n\t\tfmt.Println(\"Unsupported direction code: \" + gpxDirCode + \"! Using GoAhead!\")\n\t}\n\n\treturn brytonDirCode\n}", "func GetDirection(pinNo int) uint32 {\n\tindex := (pinNo) / 32\n\n\tregVal := readRegistry(index)\n\n\tgpio := uint32(pinNo % 32)\n\n\tval := ((regVal >> gpio) & 0x1)\n\n\treturn val\n\n}", "func (p *Port) Direction() int {\n\treturn p.direction\n}", "func (e *Elevator) GetDirection() int {\n\treturn e.direction\n}", "func (v *MenuButton) GetDirection() ArrowType {\n\tc := C.gtk_menu_button_get_direction(v.native())\n\treturn ArrowType(c)\n}", "func (road Road) GetDirection() Direction {\n\treturn road.direction\n}", "func (o PacketMirroringFilterOutput) Direction() PacketMirroringFilterDirectionPtrOutput {\n\treturn o.ApplyT(func(v PacketMirroringFilter) *PacketMirroringFilterDirection { return v.Direction }).(PacketMirroringFilterDirectionPtrOutput)\n}", "func (s *ClampDirectionOffset) Direction() dprec.Vec3 {\n\treturn s.direction\n}", "func (o PacketMirroringFilterResponseOutput) Direction() pulumi.StringOutput {\n\treturn o.ApplyT(func(v PacketMirroringFilterResponse) string { return v.Direction }).(pulumi.StringOutput)\n}", "func (r *Range) Direction() Direction {\n\tif r.Start.Equal(r.End) {\n\t\treturn Equal\n\t}\n\n\tif r.Start.Before(r.End) {\n\t\treturn Asc\n\t} else {\n\t\treturn Desc\n\t}\n}", "func (c Car) Direction(p image.Point) Direction {\n\tmeanMov := c.MeanMovement()\n\n\t// if entrance is vertical i.e. car moves LEFT<->RIGHT only consider trajectory along X axis\n\tif strings.EqualFold(entrance, \"l\") || strings.EqualFold(entrance, \"r\") {\n\t\tif p.X-int(meanMov) > 0 {\n\t\t\treturn RIGHT\n\t\t}\n\n\t\tif p.X-int(meanMov) < 0 {\n\t\t\treturn LEFT\n\t\t}\n\t}\n\n\t// if entrance is horizontal i.e. car moves TOP<->BOTTOM only consider trajectory along Y axis\n\tif strings.EqualFold(entrance, \"b\") || strings.EqualFold(entrance, \"t\") {\n\t\tif p.Y-int(meanMov) > 0 {\n\t\t\treturn DOWN\n\t\t}\n\n\t\tif p.Y-int(meanMov) < 0 {\n\t\t\treturn UP\n\t\t}\n\t}\n\n\treturn STILL\n}", "func (node *hostNode) GetPeerDirection(id peer.ID) network.Direction {\n\tconns := node.host.Network().ConnsToPeer(id)\n\n\tif len(conns) != 1 {\n\t\treturn network.DirUnknown\n\t}\n\treturn conns[0].Stat().Direction\n}", "func ProtoToComputeFirewallDirectionEnum(e computepb.ComputeFirewallDirectionEnum) *compute.FirewallDirectionEnum {\n\tif e == 0 {\n\t\treturn nil\n\t}\n\tif n, ok := computepb.ComputeFirewallDirectionEnum_name[int32(e)]; ok {\n\t\te := compute.FirewallDirectionEnum(n[len(\"ComputeFirewallDirectionEnum\"):])\n\t\treturn &e\n\t}\n\treturn nil\n}", "func DirectionFromString(d string) Direction {\n\tswitch d {\n\tcase \"L\":\n\t\treturn DirectionLeft\n\tcase \"R\":\n\t\treturn DirectionRight\n\tdefault:\n\t\treturn DirectionUnknown\n\t}\n}", "func FindDirection(alias string) (dir Direction, found bool) {\n\tdir, found = dirMap[strings.ToLower(alias)]\n\treturn dir, found\n}", "func (_this *IDBCursor) Direction() IDBCursorDirection {\n\tvar ret IDBCursorDirection\n\tvalue := _this.Value_JS.Get(\"direction\")\n\tret = IDBCursorDirectionFromJS(value)\n\treturn ret\n}", "func (r *Rule) direction(key item, l *lexer) error {\n\tif key.typ != itemDirection {\n\t\tpanic(\"item is not a direction\")\n\t}\n\tswitch key.value {\n\tcase \"->\":\n\t\tr.Bidirectional = false\n\tcase \"<>\":\n\t\tr.Bidirectional = true\n\tdefault:\n\t\treturn fmt.Errorf(\"invalid direction operator %q\", key.value)\n\t}\n\treturn nil\n}", "func ProtoToComputeAlphaPacketMirroringFilterDirectionEnum(e alphapb.ComputeAlphaPacketMirroringFilterDirectionEnum) *alpha.PacketMirroringFilterDirectionEnum {\n\tif e == 0 {\n\t\treturn nil\n\t}\n\tif n, ok := alphapb.ComputeAlphaPacketMirroringFilterDirectionEnum_name[int32(e)]; ok {\n\t\te := alpha.PacketMirroringFilterDirectionEnum(n[len(\"ComputeAlphaPacketMirroringFilterDirectionEnum\"):])\n\t\treturn &e\n\t}\n\treturn nil\n}", "func direction(player int) int {\n\tif player == 1 {\n\t\treturn 1\n\t}\n\treturn -1\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
addSegment to the wire.
func (w *wire) addSegment(dir direction, dist int) { var lastPoint point if len(w.points) != 0 { lastPoint = w.points[len(w.points)-1] } w.points = append(w.points, lastPoint.move(dir, dist)) }
[ "func (t *tcpAssembler) AddSegment(s *tcpSegment) {\n\tif t.fin == 0 || t.fin == -1 {\n\t\treturn\n\t}\n\tfor i, b := range s.Data {\n\t\toffset := uint32(i) + s.Start - t.sequence\n\t\tif offset >= uint32(len(t.buffer)) {\n\t\t\tcontinue\n\t\t}\n\t\tt.buffer[offset] = b\n\t\tt.mask[offset] = true\n\t}\n\tif s.Fin {\n\t\tfinOffset := s.Start + uint32(len(s.Data)) - t.sequence\n\t\tif finOffset <= uint32(len(t.buffer)) {\n\t\t\tt.fin = int(finOffset)\n\t\t}\n\t}\n}", "func (a *AST) AddSegment(seg *Segment) {\n\t_ = seg.SetParent(a)\n\ta.segs = append(a.segs, seg)\n}", "func (wire *Wire) AddWireSegment(dir byte, magnitude int) error {\n\tvar newSegment segment\n\tif wire.wireSegments == nil {\n\t\tnewSegment.start = Point{0, 0}\n\t\tnewSegment.end = Point{0, 0}\n\t} else {\n\t\tnewSegment = segment{wire.wireSegments[len(wire.wireSegments)-1].end, wire.wireSegments[len(wire.wireSegments)-1].end}\n\t}\n\n\tswitch dir {\n\tcase 'U':\n\t\tnewSegment.end.Y += magnitude\n\tcase 'D':\n\t\tnewSegment.end.Y -= magnitude\n\tcase 'R':\n\t\tnewSegment.end.X += magnitude\n\tcase 'L':\n\t\tnewSegment.end.X -= magnitude\n\tdefault:\n\t\treturn fmt.Errorf(\"Invalid direction given: %v\", dir)\n\t}\n\n\twire.wireSegments = append(wire.wireSegments, newSegment)\n\treturn nil\n}", "func (m *Manifest) AddSegment(s *Segment) {\n\tm.addSegment(s, true)\n\tm.addedSegments[s.ID] = struct{}{}\n}", "func (s *SegmentService) Add(memberID int, item *Segment) (*Response, error) {\n\n\tdata := struct {\n\t\tSegment `json:\"segment\"`\n\t}{*item}\n\n\treq, err := s.client.newRequest(\"POST\", fmt.Sprintf(\"segment/%d\", memberID), data)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult := &Response{}\n\tresp, err := s.client.do(req, result)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\n\titem.ID, _ = result.Obj.ID.Int64()\n\treturn result, nil\n}", "func (s *SegmentChangesWrapper) AddToSegment(segmentName string, keys []string) error {\n\treturn errSegmentStorageNotImplementedMethod\n}", "func (cfw *CoverageDataWriter) AppendSegment(args map[string]string, visitor CounterVisitor) error {\n\tcfw.stab = &stringtab.Writer{}\n\tcfw.stab.InitWriter()\n\tcfw.stab.Lookup(\"\")\n\n\tvar err error\n\tfor k, v := range args {\n\t\tcfw.stab.Lookup(k)\n\t\tcfw.stab.Lookup(v)\n\t}\n\n\tws := &slicewriter.WriteSeeker{}\n\tif err = cfw.writeSegmentPreamble(args, ws); err != nil {\n\t\treturn err\n\t}\n\tif err = cfw.writeCounters(visitor, ws); err != nil {\n\t\treturn err\n\t}\n\tif err = cfw.patchSegmentHeader(ws); err != nil {\n\t\treturn err\n\t}\n\tif err := cfw.writeBytes(ws.BytesWritten()); err != nil {\n\t\treturn err\n\t}\n\tif err = cfw.writeFooter(); err != nil {\n\t\treturn err\n\t}\n\tif err := cfw.w.Flush(); err != nil {\n\t\treturn fmt.Errorf(\"write error: %v\", err)\n\t}\n\tcfw.stab = nil\n\treturn nil\n}", "func (r Registrar) RegisterSegment(ctx context.Context, meta seg.Meta, remote net.Addr) error {\n\tconn, err := r.Dialer.Dial(ctx, remote)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\tclient := cppb.NewSegmentRegistrationServiceClient(conn)\n\t_, err = client.SegmentsRegistration(ctx,\n\t\t&cppb.SegmentsRegistrationRequest{\n\t\t\tSegments: map[int32]*cppb.SegmentsRegistrationRequest_Segments{\n\t\t\t\tint32(meta.Type): {\n\t\t\t\t\tSegments: []*cppb.PathSegment{\n\t\t\t\t\t\tseg.PathSegmentToPB(meta.Segment),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tgrpc.RetryProfile...,\n\t)\n\treturn err\n}", "func (b *Blaster) AddHitSegment(rate float64, maxHits int) {\n\tb.addHitSegment <- HitSegment{Rate: rate, MaxHits: maxHits}\n}", "func (r SplitStorageAdapter) RegisterSegment(name string) error {\n\terr := r.client.SAdd(r.segmentsRegisteredNamespace(), name).Err()\n\tif err != nil {\n\t\tlog.Debug.Println(\"Error saving segment\", name, err)\n\t}\n\treturn err\n}", "func WithSegment(name string, c echo.Context, f func() error) error {\n\ttx := GetTX(c)\n\tif tx == nil {\n\t\treturn f()\n\t}\n\tsegment := newrelic.StartSegment(tx, name)\n\tdefer segment.End()\n\treturn f()\n}", "func (s *segment) Append(b []byte) error {\n\tif s.gReader != nil {\n\t\treturn ErrImmutableSegment\n\t}\n\t_, err := s.appender.Write(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn s.appender.Sync()\n}", "func AddSegmentHook(hookPoint boil.HookPoint, segmentHook SegmentHook) {\n\tswitch hookPoint {\n\tcase boil.BeforeInsertHook:\n\t\tsegmentBeforeInsertHooks = append(segmentBeforeInsertHooks, segmentHook)\n\tcase boil.BeforeUpdateHook:\n\t\tsegmentBeforeUpdateHooks = append(segmentBeforeUpdateHooks, segmentHook)\n\tcase boil.BeforeDeleteHook:\n\t\tsegmentBeforeDeleteHooks = append(segmentBeforeDeleteHooks, segmentHook)\n\tcase boil.BeforeUpsertHook:\n\t\tsegmentBeforeUpsertHooks = append(segmentBeforeUpsertHooks, segmentHook)\n\tcase boil.AfterInsertHook:\n\t\tsegmentAfterInsertHooks = append(segmentAfterInsertHooks, segmentHook)\n\tcase boil.AfterSelectHook:\n\t\tsegmentAfterSelectHooks = append(segmentAfterSelectHooks, segmentHook)\n\tcase boil.AfterUpdateHook:\n\t\tsegmentAfterUpdateHooks = append(segmentAfterUpdateHooks, segmentHook)\n\tcase boil.AfterDeleteHook:\n\t\tsegmentAfterDeleteHooks = append(segmentAfterDeleteHooks, segmentHook)\n\tcase boil.AfterUpsertHook:\n\t\tsegmentAfterUpsertHooks = append(segmentAfterUpsertHooks, segmentHook)\n\t}\n}", "func (l *Log) newSegment(off uint64) error {\n\ts, err := newSegment(l.Dir, off, l.Config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.segments = append(l.segments, s)\n\tl.activeSegment = s\n\treturn nil\n}", "func EncodeSegment(w io.Writer, seg Segment) error {\n\t// Everything else needs the 0xff, marker and potential payload\n\t_, err := w.Write([]byte{0xff, byte(seg.Marker)})\n\tif err != nil || seg.Data == nil {\n\t\treturn err\n\t}\n\t// Payload size includes it's own 2-bytes\n\terr = binary.Write(w, binary.BigEndian, uint16(len(seg.Data))+2)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(seg.Data)\n\treturn err\n}", "func (t *Type1) writeSegment(w io.Writer, segment int) error {\n\tl := len(t.Segments[segment])\n\tvar asciiBinary byte\n\tif segment == 1 {\n\t\tasciiBinary = 2\n\t} else {\n\t\tasciiBinary = 1\n\t}\n\tprefix := []byte{128, asciiBinary, byte(l & 0xFF), byte(l >> 8 & 0xFF), byte(l >> 16 & 0xFF), byte(l >> 24 & 0xFF)}\n\t_, err := w.Write(prefix)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(t.Segments[segment])\n\treturn err\n}", "func (c *SegmentsClient) Create(segment *Segment) error {\n\treturn c.client.makeRequest(http.MethodPost, \"/contactdb/segments\", segment, &segment)\n}", "func addSegment(curr *segment, key string) (seg *segment) {\n\tif curr.parameter.segment != nil {\n\t\tseg = curr.parameter.segment\n\n\t} else if child, ok := curr.children[key]; !ok { // child does not match...\n\t\tvar isParam bool\n\n\t\tseg, isParam = newSegment(key)\n\n\t\tif isParam {\n\t\t\tcurr.parameter.segment = seg\n\t\t\tcurr.parameter.name = key[2:]\n\n\t\t} else {\n\t\t\tcurr.children[key] = seg\n\t\t}\n\n\t\treturn\n\n\t} else { // child matches...\n\t\tseg = child\n\t}\n\n\treturn\n}", "func newSegment(ctx context.Context, traceID, name string, req *http.Request, c net.Conn) *Segment {\n\tvar (\n\t\tspanID = middleware.ContextSpanID(ctx)\n\t\tparentID = middleware.ContextParentSpanID(ctx)\n\t)\n\n\ts := NewSegment(name, traceID, spanID, c)\n\ts.RecordRequest(req, \"\")\n\n\tif parentID != \"\" {\n\t\ts.ParentID = parentID\n\t\ts.Type = \"subsegment\"\n\t}\n\n\treturn s\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
interceptPoints returns every point where the wire collides with wire o. The points' wireLen is the total wire length to get to that point (both wire combined).
func (w *wire) interceptPoints(o wire) []point { var interceptPoints []point for i := 1; i < len(w.points); i++ { v1 := segment{ from: w.points[i-1], to: w.points[i], } for u := 1; u < len(o.points); u++ { v2 := segment{ from: o.points[u-1], to: o.points[u], } intercept := v1.intercepts(v2) if intercept.x != 0 && intercept.y != 0 { // Calculate total wire length (both wires combined) intercept.wireLen = v1.from.wireLen + intercept.distanceToPoint(v1.from) + v2.from.wireLen + intercept.distanceToPoint(v2.from) interceptPoints = append(interceptPoints, intercept) } } } return interceptPoints }
[ "func (l line2) SlopeIntercept() (float64, float64) {\n\tslope := (l.end.y - l.start.y) / (l.end.x - l.start.x)\n\tintercept := l.start.y - slope*l.start.x\n\treturn slope, intercept\n}", "func (l *Line) GetIntersectionPoints(other Shape) []IntersectionPoint {\n\n\tintersections := []IntersectionPoint{}\n\n\tswitch b := other.(type) {\n\n\tcase *Line:\n\n\t\tdet := (l.X2-l.X)*(b.Y2-b.Y) - (b.X2-b.X)*(l.Y2-l.Y)\n\n\t\tif det != 0 {\n\n\t\t\t// MAGIC MATH; the extra + 1 here makes it so that corner cases (literally aiming the line through the corners of the\n\t\t\t// hollow square in world5) works!\n\n\t\t\tlambda := (((l.Y - b.Y) * (b.X2 - b.X)) - ((l.X - b.X) * (b.Y2 - b.Y)) + 1) / det\n\n\t\t\tgamma := (((l.Y - b.Y) * (l.X2 - l.X)) - ((l.X - b.X) * (l.Y2 - l.Y)) + 1) / det\n\n\t\t\tif (0 < lambda && lambda < 1) && (0 < gamma && gamma < 1) {\n\t\t\t\tdx, dy := l.GetDelta()\n\t\t\t\tintersections = append(intersections, IntersectionPoint{l.X + lambda*dx, l.Y + lambda*dy, other})\n\t\t\t}\n\n\t\t}\n\tcase *Rectangle:\n\t\tside := NewLine(b.X, b.Y, b.X, b.Y+b.H)\n\t\tintersections = append(intersections, l.GetIntersectionPoints(side)...)\n\n\t\tside.Y = b.Y + b.H\n\t\tside.X2 = b.X + b.W\n\t\tside.Y2 = b.Y + b.H\n\t\tintersections = append(intersections, l.GetIntersectionPoints(side)...)\n\n\t\tside.X = b.X + b.W\n\t\tside.Y2 = b.Y\n\t\tintersections = append(intersections, l.GetIntersectionPoints(side)...)\n\n\t\tside.Y = b.Y\n\t\tside.X2 = b.X\n\t\tside.Y2 = b.Y\n\t\tintersections = append(intersections, l.GetIntersectionPoints(side)...)\n\tcase *Space:\n\t\tfor _, shape := range *b {\n\t\t\tintersections = append(intersections, l.GetIntersectionPoints(shape)...)\n\t\t}\n\tcase *Circle:\n\t\t// \tTO-DO: Add this later, because this is kinda hard and would necessitate some complex vector math that, for whatever\n\t\t// reason, is not even readily available in a Golang library as far as I can tell???\n\t\tbreak\n\t}\n\n\t// fmt.Println(\"WARNING! Object \", other, \" isn't a valid shape for collision testing against Line \", l, \"!\")\n\n\tsort.Slice(intersections, func(i, j int) bool {\n\t\treturn Distance(l.X, l.Y, intersections[i].X, intersections[i].Y) < Distance(l.X, l.Y, intersections[j].X, intersections[j].Y)\n\t})\n\n\treturn intersections\n\n}", "func getSlopeIntercept(p1 Point, p2 Point) (slope float64, intercept float64) {\n\tslope = (float64(p2.Y) - float64(p1.Y)) / (float64(p2.X) - float64(p1.X))\n\tintercept = float64(p1.Y) - slope*float64(p1.X)\n\n\treturn slope, intercept\n}", "func (v segment) intercepts(o segment) point {\n\t// With the assumption that no interceptions occur when segments are\n\t// parallel, and that segments always move either horizontally or\n\t// vertically (not both), we can pretty easily check for interceptions.\n\t//\n\t// First find the values where interception could occur, and what axis for\n\t// both segments are changing. I.e. if the segments are horizontal\n\t// or vertical.\n\ta, axAxis := v.unchangingAxis()\n\tb, bxAxis := o.unchangingAxis()\n\tif axAxis == bxAxis {\n\t\t// We're assuming that they can't overlap\n\t\t// when they are parallel\n\t\treturn point{}\n\t}\n\n\t// Check if the first value (x or y) is on the interval of the\n\t// same axis of the other segment. Do this for the other value (axis) too.\n\tvar aCanCollide bool\n\tif axAxis {\n\t\taCanCollide = inRange(a, o.from.x, o.to.x)\n\t} else {\n\t\taCanCollide = inRange(a, o.from.y, o.to.y)\n\t}\n\tvar bCanCollide bool\n\tif bxAxis {\n\t\tbCanCollide = inRange(b, v.from.x, v.to.x)\n\t} else {\n\t\tbCanCollide = inRange(b, v.from.y, v.to.y)\n\t}\n\n\t// If both axes are in range then they collide\n\tif aCanCollide && bCanCollide {\n\t\t// Check if a is an x- or y-value\n\t\tif axAxis {\n\t\t\treturn point{x: a, y: b}\n\t\t}\n\t\treturn point{x: b, y: a}\n\t}\n\treturn point{x: 0, y: 0}\n}", "func (r Rect) IntersectionPoints(l Line) []Vec {\n\t// Use map keys to ensure unique points\n\tpointMap := make(map[Vec]struct{})\n\n\tfor _, edge := range r.Edges() {\n\t\tif intersect, ok := l.Intersect(edge); ok {\n\t\t\tpointMap[intersect] = struct{}{}\n\t\t}\n\t}\n\n\tpoints := make([]Vec, 0, len(pointMap))\n\tfor point := range pointMap {\n\t\tpoints = append(points, point)\n\t}\n\n\t// Order the points\n\tif len(points) == 2 {\n\t\tif points[1].To(l.A).Len() < points[0].To(l.A).Len() {\n\t\t\treturn []Vec{points[1], points[0]}\n\t\t}\n\t}\n\n\treturn points\n}", "func getPointsFromVertex(x1 int, x2 int, y1 int, y2 int) []point {\n\tvar num float64\n\tvar slope int\n\tvar x, y int\n\tif y2 == y1 {\n\t\tnum = math.Abs(float64(x2 - x1))\n\t} else if x2 == x1 {\n\t\tnum = math.Abs(float64(y2 - y1))\n\t} else {\n\t\tnum = math.Min(math.Abs(float64(y2-y1)), math.Abs(float64(x2-x1)))\n\t}\n\tquantity := int(num)\n\tpoints := make([]point, quantity+1)\n\tydiff := y2 - y1\n\txdiff := x2 - x1\n\tif ydiff == 0 || xdiff == 0 {\n\t\tslope = 0\n\t} else if math.Abs(float64(ydiff)) > math.Abs(float64(xdiff)) {\n\t\tslope = ydiff / xdiff\n\t} else {\n\t\tslope = xdiff / ydiff\n\t}\n\ti := 0\n\tfor i < quantity {\n\t\tif math.Abs(float64(xdiff)) < math.Abs(float64(ydiff)) {\n\t\t\tif slope == 0 {\n\t\t\t\tx = 0\n\t\t\t} else {\n\t\t\t\tx = xdiff / quantity * i\n\t\t\t}\n\t\t\tif slope == 0 {\n\t\t\t\ty = ydiff / quantity * i\n\t\t\t} else {\n\t\t\t\tif xdiff < 0 {\n\t\t\t\t\ty = -slope * i\n\t\t\t\t} else {\n\t\t\t\t\ty = slope * i\n\t\t\t\t}\n\t\t\t}\n\t\t\t//fmt.Printf(\"quantity is %d, slope is %d, y is %d ydiff is %d\\n\", quantity, slope, y, ydiff)\n\t\t\tpoints[i] = point{x + x1, y + y1}\n\t\t\ti++\n\t\t} else {\n\t\t\tif slope == 0 {\n\t\t\t\ty = 0\n\t\t\t} else {\n\t\t\t\ty = ydiff / quantity * i\n\t\t\t}\n\t\t\tif slope == 0 {\n\t\t\t\tx = xdiff / quantity * i\n\t\t\t} else {\n\t\t\t\tif ydiff < 0 {\n\t\t\t\t\tx = -slope * i\n\t\t\t\t} else {\n\t\t\t\t\tx = slope * i\n\t\t\t\t}\n\t\t\t}\n\t\t\t//fmt.Printf(\"quantity is %d, slope is %d, x is %d xdiff is %d\\n\", quantity, slope, x, xdiff)\n\t\t\tpoints[i] = point{x + x1, y + y1}\n\t\t\ti++\n\t\t}\n\n\t}\n\tpoints[quantity].x = x2\n\tpoints[quantity].y = y2\n\t//fmt.Printf(\"%v\\n\", points)\n\treturn points\n}", "func lineLow(x0, y0, x1, y1 int) []image.Point {\n\tdeltaX := x1 - x0\n\tdeltaY := y1 - y0\n\n\tstepY := 1\n\tif deltaY < 0 {\n\t\tstepY = -1\n\t\tdeltaY = -deltaY\n\t}\n\n\tvar res []image.Point\n\tdiff := 2*deltaY - deltaX\n\ty := y0\n\tfor x := x0; x <= x1; x++ {\n\t\tres = append(res, image.Point{x, y})\n\t\tif diff > 0 {\n\t\t\ty += stepY\n\t\t\tdiff -= 2 * deltaX\n\t\t}\n\t\tdiff += 2 * deltaY\n\t}\n\treturn res\n}", "func (r Ruler) LineSliceAlong(start float64, stop float64, l Line) Line {\n\tvar sum float64\n\tvar slice []Point\n\n\tfor i := 0; i < len(l)-1; i++ {\n\t\tp0 := l[i]\n\t\tp1 := l[i+1]\n\t\td := r.Distance(p0, p1)\n\n\t\tsum += d\n\n\t\tif sum > start && len(slice) == 0 {\n\t\t\tslice = append(slice, interpolate(p0, p1, (start-(sum-d))/d))\n\t\t}\n\n\t\tif sum >= stop {\n\t\t\tslice = append(slice, interpolate(p0, p1, (stop-(sum-d))/d))\n\t\t\treturn slice\n\t\t}\n\n\t\tif sum > start {\n\t\t\tslice = append(slice, p1)\n\t\t}\n\t}\n\n\treturn slice\n}", "func decorateClns(clns [][]int, l []Point) [][]int {\n\tr := clns\n\tfor i := 0; i < len(clns); i++ {\n\t\tpIdx1 := clns[i][0]\n\t\tpIdx2 := clns[i][1]\n\n\t\tvar P1, P11, P12, P13, P2, P21, P22, P23 Point\n\t\tvar dxbase, dyw, dxw, dye, dxe int\n\t\tvar dyw2, dye2 int\n\n\t\tP1 = l[pIdx1]\n\t\tif pIdx1-1 >= 0 {\n\t\t\tP11 = l[pIdx1-1]\n\t\t\tdyw = (P1.y - P11.y)\n\t\t}\n\t\tif pIdx1-2 >= 0 {\n\t\t\tP12 = l[pIdx1-2]\n\t\t\tdxw = P11.x - P12.x\n\t\t}\n\t\tif pIdx1-3 >= 0 {\n\t\t\tP13 = l[pIdx1-3]\n\t\t\tdyw2 = (P12.y - P13.y)\n\t\t}\n\n\t\tP2 = l[pIdx2]\n\t\tif pIdx2+1 < len(l) {\n\t\t\tP21 = l[pIdx2+1]\n\t\t\tdye = (P2.y - P21.y)\n\t\t}\n\t\tif pIdx2+2 < len(l) {\n\t\t\tP22 = l[pIdx2+2]\n\t\t\tdxe = P22.x - P21.x\n\t\t}\n\t\tif pIdx2+3 < len(l) {\n\t\t\tP23 = l[pIdx2+3]\n\t\t\tdye2 = (P22.y - P23.y)\n\t\t}\n\n\t\tdxbase = P2.x - P1.x\n\t\t// The outline goes like this:\n\t\t//\n\t\t// PXP\n\t\t// XXX\n\t\t// XXP PXPXPXX\n\t\t// XXX XXXXXXX\n\t\t// XXPXXXXXXXXXXXXPXXXXXX\n\t\t//\n\t\t// Therefore we have to make corrections:\n\t\tif dyw <= 0 && dye <= 0 {\n\t\t\tdxbase++ // convex sourrounding\n\t\t}\n\t\tif dyw > 0 && dye > 0 {\n\t\t\tdxbase-- // concave sourrounding\n\t\t}\n\n\t\t// decr incr for the neighboring x-sections\n\t\t// but dyw inverted - 'cause relative to *this* section\n\t\tif dyw > 0 && dyw2 <= 0 { // dyw2 == 0 meaning end of world => convex surrounding\n\t\t\tdxw++\n\t\t}\n\t\tif dyw < 0 && dyw2 > 0 {\n\t\t\tdxw--\n\t\t}\n\n\t\tif dye > 0 && dye2 <= 0 {\n\t\t\tdxe++\n\t\t}\n\t\tif dye < 0 && dye2 > 0 {\n\t\t\tdxe--\n\t\t}\n\n\t\t// pf(\"%v %v %v . %v %v %v \\n\", P1, P11, P12, P2, P21, P22)\n\n\t\tr[i] = append(r[i], dxbase,\n\t\t\tdyw, dxw, dyw2,\n\t\t\tdye, dxe, dye2)\n\t}\n\treturn r\n}", "func lineThrough(x0, x1 point) func(x float64) float64 {\n\treturn func(x float64) float64 { return (x1[1]-x0[1])/(x1[0]-x0[0])*(x-x0[0]) + x0[1] }\n}", "func polylineCoversPoint(a *s2.Polyline, b s2.Point) bool {\n\treturn a.IntersectsCell(s2.CellFromPoint(b))\n}", "func (p Painter) Polyline(x, y []int) {\n\tif len(x) != len(y) {\n\t\tpanic(\"Coordinate-list length error\")\n\t}\n\n\tfor i := 0; i < len(x)-1; i++ {\n\t\tp.Line(x[i], y[i], x[i+1], y[i+1])\n\t}\n}", "func (c Circle) IntersectionPoints(l Line) []Vec {\n\tcContainsA := c.Contains(l.A)\n\tcContainsB := c.Contains(l.B)\n\n\t// Special case for both endpoint being contained within the circle\n\tif cContainsA && cContainsB {\n\t\treturn []Vec{}\n\t}\n\n\t// Get closest point on the line to this circles' center\n\tclosestToCenter := l.Closest(c.Center)\n\n\t// If the distance to the closest point is greater than the radius, there are no points of intersection\n\tif closestToCenter.To(c.Center).Len() > c.Radius {\n\t\treturn []Vec{}\n\t}\n\n\t// If the distance to the closest point is equal to the radius, the line is tangent and the closest point is the\n\t// point at which it touches the circle.\n\tif closestToCenter.To(c.Center).Len() == c.Radius {\n\t\treturn []Vec{closestToCenter}\n\t}\n\n\t// Special case for endpoint being on the circles' center\n\tif c.Center == l.A || c.Center == l.B {\n\t\totherEnd := l.B\n\t\tif c.Center == l.B {\n\t\t\totherEnd = l.A\n\t\t}\n\t\tintersect := c.Center.Add(c.Center.To(otherEnd).Unit().Scaled(c.Radius))\n\t\treturn []Vec{intersect}\n\t}\n\n\t// This means the distance to the closest point is less than the radius, so there is at least one intersection,\n\t// possibly two.\n\n\t// If one of the end points exists within the circle, there is only one intersection\n\tif cContainsA || cContainsB {\n\t\tcontainedPoint := l.A\n\t\totherEnd := l.B\n\t\tif cContainsB {\n\t\t\tcontainedPoint = l.B\n\t\t\totherEnd = l.A\n\t\t}\n\n\t\t// Use trigonometry to get the length of the line between the contained point and the intersection point.\n\t\t// The following is used to describe the triangle formed:\n\t\t// - a is the side between contained point and circle center\n\t\t// - b is the side between the center and the intersection point (radius)\n\t\t// - c is the side between the contained point and the intersection point\n\t\t// The captials of these letters are used as the angles opposite the respective sides.\n\t\t// a and b are known\n\t\ta := containedPoint.To(c.Center).Len()\n\t\tb := c.Radius\n\t\t// B can be calculated by subtracting the angle of b (to the x-axis) from the angle of c (to the x-axis)\n\t\tB := containedPoint.To(c.Center).Angle() - containedPoint.To(otherEnd).Angle()\n\t\t// Using the Sin rule we can get A\n\t\tA := math.Asin((a * math.Sin(B)) / b)\n\t\t// Using the rule that there are 180 degrees (or Pi radians) in a triangle, we can now get C\n\t\tC := math.Pi - A + B\n\t\t// If C is zero, the line segment is in-line with the center-intersect line.\n\t\tvar c float64\n\t\tif C == 0 {\n\t\t\tc = b - a\n\t\t} else {\n\t\t\t// Using the Sine rule again, we can now get c\n\t\t\tc = (a * math.Sin(C)) / math.Sin(A)\n\t\t}\n\t\t// Travelling from the contained point to the other end by length of a will provide the intersection point.\n\t\treturn []Vec{\n\t\t\tcontainedPoint.Add(containedPoint.To(otherEnd).Unit().Scaled(c)),\n\t\t}\n\t}\n\n\t// Otherwise the endpoints exist outside of the circle, and the line segment intersects in two locations.\n\t// The vector formed by going from the closest point to the center of the circle will be perpendicular to the line;\n\t// this forms a right-angled triangle with the intersection points, with the radius as the hypotenuse.\n\t// Calculate the other triangles' sides' length.\n\ta := math.Sqrt(math.Pow(c.Radius, 2) - math.Pow(closestToCenter.To(c.Center).Len(), 2))\n\n\t// Travelling in both directions from the closest point by length of a will provide the two intersection points.\n\tfirst := closestToCenter.Add(closestToCenter.To(l.A).Unit().Scaled(a))\n\tsecond := closestToCenter.Add(closestToCenter.To(l.B).Unit().Scaled(a))\n\n\tif first.To(l.A).Len() < second.To(l.A).Len() {\n\t\treturn []Vec{first, second}\n\t}\n\treturn []Vec{second, first}\n}", "func polylineCentroid(p []Point) Point {\n\tvar centroid r3.Vector\n\tfor i := 1; i < len(p); i++ {\n\t\tcentroid = centroid.Add(EdgeTrueCentroid(p[i-1], p[i]).Vector)\n\t}\n\treturn Point{centroid}\n}", "func (o ElemU) Ipoints() (coords [][]float64) {\n\tcoords = la.MatAlloc(len(o.IpsElem), Global.Ndim)\n\tfor idx, ip := range o.IpsElem {\n\t\tcoords[idx] = o.Shp.IpRealCoords(o.X, ip)\n\t}\n\treturn\n}", "func (xye *XYextentSearch) Intersect(ext [4]float64) []int {\n\tx0 := slice.GetIndxFloat64(xye.m[0].Val, ext[0])\n\tx1 := slice.GetIndxFloat64(xye.m[0].Val, ext[1])\n\ty0 := slice.GetIndxFloat64(xye.m[2].Val, ext[2])\n\ty1 := slice.GetIndxFloat64(xye.m[2].Val, ext[3])\n\treturn slice.Intersect(xye.m[0].Indx[x0:x1], xye.m[2].Indx[y0:y1])\n}", "func pointslope(pp *privPath, i, j int) (ctr, dir Point) {\n\t// assume i<j\n\n\tn := len(pp.Pt)\n\tsums := pp.Sums\n\tr := 0 // rotations from i to j\n\n\tfor j >= n {\n\t\tj -= n\n\t\tr++\n\t}\n\tfor i >= n {\n\t\ti -= n\n\t\tr--\n\t}\n\tfor j < 0 {\n\t\tj += n\n\t\tr--\n\t}\n\tfor i < 0 {\n\t\ti += n\n\t\tr++\n\t}\n\n\tx := float64(sums[j+1].x - sums[i].x + r*sums[n].x)\n\ty := float64(sums[j+1].y - sums[i].y + r*sums[n].y)\n\tx2 := float64(sums[j+1].x2 - sums[i].x2 + r*sums[n].x2)\n\txy := float64(sums[j+1].xy - sums[i].xy + r*sums[n].xy)\n\ty2 := float64(sums[j+1].y2 - sums[i].y2 + r*sums[n].y2)\n\tk := float64(j + 1 - i + r*n)\n\n\tctr.X = x / k\n\tctr.Y = y / k\n\n\ta := (x2 - x*x/k) / k\n\tb := (xy - x*y/k) / k\n\tc := (y2 - y*y/k) / k\n\n\tlambda2 := (a + c + math.Sqrt((a-c)*(a-c)+4*b*b)) / 2 // larger e.value\n\n\t// now find e.vector for lambda2\n\ta -= lambda2\n\tc -= lambda2\n\n\tvar l float64\n\tif fabs(a) >= fabs(c) {\n\t\tl = math.Sqrt(a*a + b*b)\n\t\tif l != 0 {\n\t\t\tdir.X = -b / l\n\t\t\tdir.Y = a / l\n\t\t}\n\t} else {\n\t\tl = math.Sqrt(c*c + b*b)\n\t\tif l != 0 {\n\t\t\tdir.X = -c / l\n\t\t\tdir.Y = b / l\n\t\t}\n\t}\n\tif l == 0 {\n\t\tdir.X, dir.Y = 0, 0 // sometimes this can happen when k=4: the two eigenvalues coincide\n\t}\n\treturn\n}", "func (s *Server) GetPoints() []Message {\n\ts.cond.L.Lock()\n\tdefer s.cond.L.Unlock()\n\tcpy := make([]Message, len(s.points))\n\tcopy(cpy, s.points)\n\treturn cpy\n}", "func polylineCoversPointWithIdx(a *s2.Polyline, b s2.Point) (bool, int) {\n\tfor edgeIdx := 0; edgeIdx < a.NumEdges(); edgeIdx++ {\n\t\tif edgeCoversPoint(a.Edge(edgeIdx), b) {\n\t\t\treturn true, edgeIdx\n\t\t}\n\t}\n\treturn false, -1\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
move returns a new point that has the same properties as the point, but has moved a certain distance dist in direction dir.
func (p point) move(dir direction, dist int) point { var movedPoint point switch dir { case up: movedPoint = point{x: p.x, y: p.y + dist} case down: movedPoint = point{x: p.x, y: p.y - dist} case right: movedPoint = point{x: p.x + dist, y: p.y} case left: movedPoint = point{x: p.x - dist, y: p.y} } movedPoint.wireLen = p.wireLen + dist return movedPoint }
[ "func movePoint(p *Point2D, dx, dy float64) {\n\tp.Move(dx, dy)\n}", "func (p *Point) Move(dx int, dy int) {\n\tp.X += dx\n\tp.Y += dy\n}", "func (p *Point) Move(dx, dy int) {\n\tp.X += dx\n\tp.Y += dy\n}", "func (p *Point2D) Move(deltaX, deltaY float64) {\n\t// if you want to modify the \"object\" (i.e. the value) you need to pass a pointer\n\t// otherwise you would only get a copy (by-value)\n\n\t// this is actually short-hand for (*p).x and (*p).y. Go does magic dereferencing on struct pointers.\n\tp.x += deltaX\n\tp.y += deltaY\n}", "func (d *droid) move(direction int) int {\n\td.code.PushInput(int64(direction))\n\td.code.Continue()\n\n\tmoveResult := int(d.code.PopOutput())\n\tif moveResult != 0 {\n\t\tif direction == 1 {\n\t\t\td.location.y--\n\t\t} else if direction == 2 {\n\t\t\td.location.y++\n\t\t} else if direction == 3 {\n\t\t\td.location.x--\n\t\t} else {\n\t\t\td.location.x++\n\t\t}\n\t}\n\n\tif moveResult == 2 {\n\t\td.foundTarget = true\n\t\td.oxygenPosition = &point{x: d.location.x, y: d.location.y}\n\t}\n\n\treturn moveResult\n}", "func (m Mock) Move(ctx context.Context, dir Direction) {}", "func (vm *Machine) move(stmt gcode.Block) {\n\tnewX, newY, newZ, _, _, _ := vm.calcPos(stmt)\n\tvm.addPos(Position{vm.State, newX, newY, newZ})\n}", "func (d *droid) moveToPoint(p *point) {\n\tif !(d.location.x == p.x && d.location.y == p.y) {\n\t\tpath := d.calculatePath(d.location, p)\n\t\tfor _, step := range path {\n\t\t\tmoveResult := d.move(step)\n\t\t\tif moveResult == 0 {\n\t\t\t\tlog.Fatal(\"Move to point can't find path\")\n\t\t\t}\n\t\t}\n\t}\n}", "func (c Circle) Moved(delta Vec) Circle {\n\treturn Circle{\n\t\tCenter: c.Center.Add(delta),\n\t\tRadius: c.Radius,\n\t}\n}", "func (l *Line) Move(direction Vector) Vector {\n\tl.q = l.q.Add(direction)\n\tl.p = l.p.Add(direction)\n\n\treturn l.Center()\n}", "func (c *Constraints) move(dx, dy float32) {\n\tc.x.move(dx)\n\tc.y.move(dy)\n}", "func (r Ray3) Moved(dist float64) Vector3 {\n\treturn r.Origin.Plus(r.Dir.Scaled(dist))\n}", "func (p *Particle) Move(step float64) {\n\tp.Pos.Add(p.Vel.Mult(step))\n}", "func (cp *ConvexPolygon) Move(x, y float64) {\n\tcp.X += x\n\tcp.Y += y\n}", "func (e *Engine) Move(dir Direction) (success bool) {\n\tsuccess = false\n\n\tdest1 := e.CurrentState.Figure.Add(dir.Point())\n\tvalid, containsBox := e.CheckDestination(dest1)\n\tif !valid {\n\t\treturn\n\t}\n\tvar dest2 Point\n\tif valid && containsBox {\n\t\tdest2 = dest1.Add(dir.Point())\n\t\tvalid, containsSecBox := e.CheckDestination(dest2)\n\t\tif !valid || containsSecBox {\n\t\t\treturn\n\t\t}\n\t}\n\n\tsuccess = true\n\te.appendState2History(e.CurrentState)\n\n\tif containsBox {\n\t\te.moveBox(dest1, dest2)\n\t}\n\te.CurrentState.Figure = dest1\n\treturn\n}", "func (p *player) Move(direction rune) {\n\tmaxX, _, _ := p.r.GetRendererOutputSize()\n\tswitch direction {\n\tcase 'l':\n\t\tp.x -= p.c.stepSize\n\t\tif p.x < 0 {\n\t\t\tp.x = 0\n\t\t}\n\tcase 'r':\n\t\tp.x += p.c.stepSize\n\t\tif p.x+p.w > int32(maxX) {\n\t\t\tp.x = int32(maxX) - p.w\n\t\t}\n\t}\n}", "func (point Point) Walk(direction Direction) Point {\n\tswitch direction {\n\tcase DirectionTop:\n\t\tpoint.Y++\n\tcase DirectionDown:\n\t\tpoint.Y--\n\tcase DirectionRight:\n\t\tpoint.X++\n\tcase DirectionLeft:\n\t\tpoint.X--\n\t}\n\n\treturn point\n}", "func (s *swimmer) move() {\n\ts.xPos += s.direction()\n}", "func (v Vertex) Move(dx, dy int) {\n\tv.x = v.x + dx\n\tv.y = v.y + dy\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
distanceToOrigin returns the Manhattan distance to origin (0, 0).
func (p point) distanceToOrigin() int { return p.distanceToPoint(point{x: 0, y: 0}) }
[ "func (c *Coordinate) ManhattanDistance() int {\n\treturn int(math.Abs(float64(c.X)) + math.Abs(float64(c.Y)))\n}", "func (s *ship) manhattanDistance() int {\n\tabs := func(num int) int {\n\t\tif num < 0 {\n\t\t\treturn -num\n\t\t}\n\t\treturn num\n\t}\n\treturn abs(s.x) + abs(s.y)\n}", "func getManhattanDistance(x Point, y Point) int {\n\treturn abs(y.x-x.x) + abs(y.y-x.y)\n}", "func ManhattanDist(a Location, b Location) int {\n\treturn util.AbsInt(a.Row-b.Row) + util.AbsInt(a.Col-b.Col)\n}", "func manhattanDistance(a, b xy) int {\n\treturn abs(a.x-b.x) + abs(a.y-b.y)\n}", "func CalculateManhattanDistance(directionCountMap map[Direction]int) int {\n\teastWest := math.Abs(float64(directionCountMap[East]) - float64(directionCountMap[West]))\n\tnorthSouth := math.Abs(float64(directionCountMap[North]) - float64(directionCountMap[South]))\n\treturn int(eastWest + northSouth)\n}", "func ManhattanDistance(p1, p2 Point) int {\n\tdx := p1.x - p2.x\n\tif dx < 0 {\n\t\tdx = -dx\n\t}\n\tdy := p1.y - p2.y\n\tif dy < 0 {\n\t\tdy = -dy\n\t}\n\n\treturn dx + dy\n}", "func (md ManhattanDist) CalcDist(a, b *matrix.DenseMatrix) float64 {\n\treturn math.Abs(a.Get(0, 0)-b.Get(0, 0)) + math.Abs(a.Get(0, 1)-b.Get(0, 1))\n}", "func (dla *DLASystem) DeterminePositionDistanceFromOrigin(position [2]int) float64 {\n\n\tradiusSquared := float64(position[0]*position[0] + position[1]*position[1])\n\treturn math.Sqrt(radiusSquared)\n}", "func CalculateDistanceToDestination(models.Position, models.MobilePhone) float64 {\n\n\t// TODO: Calculate Distance between Mobile Device and Destination\n\treturn 0.0\n}", "func (m match) dist() uint32 {\n\treturn uint32(m.distance - minDistance)\n}", "func TestManhattanDist(t *testing.T) {\n\tw := NewWorld(10, 10, 100, 1_000_000, 25_000, 50_000, 50, 500_000, 900_000, 1_1000_000)\n\tcases := []struct {\n\t\tf, g *Firefly\n\t\twant float32\n\t}{\n\t\t{\n\t\t\tNewFirefly(99.5, 99.5, 0, 0, 1000000, w),\n\t\t\tNewFirefly(99.5, 99.5, 0, 0, 1000000, w),\n\t\t\t0,\n\t\t},\n\t\t{\n\t\t\tNewFirefly(50, 50, 0, 0, 1000000, w),\n\t\t\tNewFirefly(50, 950, 0, 0, 1000000, w),\n\t\t\t100,\n\t\t},\n\t\t{\n\t\t\tNewFirefly(50, 850, 0, 0, 1000000, w),\n\t\t\tNewFirefly(50, 950, 0, 0, 1000000, w),\n\t\t\t100,\n\t\t},\n\t\t{\n\t\t\tNewFirefly(50, 50, 0, 0, 1000000, w),\n\t\t\tNewFirefly(950, 50, 0, 0, 1000000, w),\n\t\t\t100,\n\t\t},\n\t\t{\n\t\t\tNewFirefly(50, 50, 0, 0, 1000000, w),\n\t\t\tNewFirefly(950, 950, 0, 0, 1000000, w),\n\t\t\t200,\n\t\t},\n\t\t{\n\t\t\tNewFirefly(50, 50, 0, 0, 1000000, w),\n\t\t\tNewFirefly(150, 150, 0, 0, 1000000, w),\n\t\t\t200,\n\t\t},\n\t}\n\tfor _, c := range cases {\n\t\tgot := ManhattanDist(c.f, c.g)\n\t\tassert.InDelta(t, got, c.want, 1e-6, fmt.Sprintf(\"Failed case %+v, got %+v\", c, got))\n\t}\n}", "func (q *DistanceFeatureQuery) Origin(origin interface{}) *DistanceFeatureQuery {\n\tq.origin = origin\n\treturn q\n}", "func (v Vector3D) ManhattanLength() int {\n\treturn AbsInt(v.x) + AbsInt(v.y) + AbsInt(v.z)\n}", "func manhattan(rating1, rating2 map[string]float64) (distance float64) {\n\tfor k, v := range rating1 {\n\t\tif v2, ok := rating2[k]; ok {\n\t\t\tdistance += math.Abs(v - v2)\n\t\t}\n\t}\n\treturn distance\n}", "func (c card) distance(d card) int {\n\tdist := d.number - c.number\n\tif dist < 0 {\n\t\tdist += 13\n\t}\n\treturn dist\n}", "func (c Creature) dist() int {\n\tx := math.Pow(float64(Hero.X()-c.X()), 2)\n\ty := math.Pow(float64(Hero.Y()-c.Y()), 2)\n\n\treturn int(math.Sqrt(x + y))\n}", "func distance(from, to *api.Coord) float64 {\n\treturn math.Sqrt(math.Pow(math.Abs(float64(from.X-to.X)), 2) + math.Pow(math.Abs(float64(from.Y-to.Y)), 2))\n}", "func (c *GMapsClient) GetDistance(ctx context.Context, origin, destination *model.Location) (*model.Distance, error) {\n\to, err := origin.Get()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"GetDistance: %s\", err.Error())\n\t}\n\n\td, err := destination.Get()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"GetDistance: %s\", err.Error())\n\t}\n\n\treq := &maps.DistanceMatrixRequest{\n\t\tOrigins: []string{o},\n\t\tDestinations: []string{d},\n\t\tMode: maps.TravelModeWalking,\n\t\tUnits: maps.UnitsImperial,\n\t}\n\n\tresp, err := c.c.DistanceMatrix(ctx, req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"GetDistance: %s\", err.Error())\n\t}\n\n\tif len(resp.Rows) == 0 {\n\t\treturn nil, fmt.Errorf(\"GetDistance: No DistanceMatrixRows\")\n\t}\n\n\tdist := &model.Distance{\n\t\tID: destination.ID,\n\t\tDuration: resp.Rows[0].Elements[0].Duration / time.Minute,\n\t\tLength: resp.Rows[0].Elements[0].Distance.HumanReadable,\n\t}\n\n\treturn dist, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
abs returns the absolute value of i.
func abs(i int) int { if i < 0 { return -i } return i }
[ "func Iabs(a int) int {\n\tif a < 0 {\n\t\treturn -a\n\t}\n\treturn a\n}", "func (bi Int) Abs() Int {\n\tif bi.GreaterThanEqual(Zero()) {\n\t\treturn bi.Copy()\n\t}\n\treturn bi.Neg()\n}", "func IAbs(n int) int {\n\tif n < 0 {\n\t\treturn -n\n\t}\n\tif n == 0 {\n\t\treturn 0\n\t}\n\treturn n\n}", "func absInt(v int) int {\n\tif v < 0 {\n\t\treturn -v\n\t}\n\treturn v\n}", "func IntAbs(a int) int {\n\tif a < 0 {\n\t\treturn -a\n\t}\n\treturn a\n}", "func abs(x int64) int64 {\n\tif x < 0 {\n\t\treturn -x\n\t}\n\treturn x\n}", "func Absolute(x int) int {\n\tif x < 0 {\n\t\treturn -x\n\t}\n\treturn x\n}", "func IntAbs(x int) int {\n\tif x > 0 {\n\t\treturn x\n\t}\n\n\treturn -x\n}", "func (i Instruction) ToAbsolute() Instruction {\n\treturn i & 0xdf\n}", "func (m mathUtil) AbsInt(value int) int {\n\tif value < 0 {\n\t\treturn -value\n\t}\n\treturn value\n}", "func Abs(a int) int {\n\treturn neogointernal.Opcode1(\"ABS\", a).(int)\n}", "func Abs(v int) int {\n\tif v > 0 {\n\t\treturn v\n\t}\n\treturn -v\n}", "func abs(n int) int {\n\ty := n >> 31\n\treturn (n ^ y) - y\n}", "func Abs(operand int) int {\n\tif operand < 0 {\n\t\treturn operand * -1\n\t}\n\treturn operand\n}", "func AbsInt(a int) int {\n\tif a < 0 {\n\t\treturn -a\n\t}\n\treturn a\n}", "func Abs(number int) int {\n\tif number > 0 {\n\t\treturn number\n\t}\n\treturn -number\n}", "func AbsInt(v int) int {\n\tif v < 0 {\n\t\treturn -v\n\t}\n\treturn v\n}", "func abs(slice []float64) []float64 {\n\tvar newSlice []float64\n\tfor _, value := range slice {\n\t\tif value < 0.0 {\n\t\t\tvalue = math.Abs(value)\n\t\t}\n\t\tnewSlice = append(newSlice, value)\n\t}\n\treturn newSlice\n}", "func absValueIf(v int64, absolute bool) int64 {\n\tif absolute && v < 0 {\n\t\tv = -v\n\t}\n\treturn v\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
unchangingAxis returns what axis and the value of that axis. It assumes that exactly one axis is changing.
func (v segment) unchangingAxis() (val int, xAxis bool) { if v.from.x == v.to.x { return v.from.x, true } return v.from.y, false }
[ "func (i *InputHandler) GetAxis(negativeKey, positiveKey Key) float32 {\n\tsum := float32(0)\n\tif i.GetKey(negativeKey) {\n\t\tsum -= 1.0\n\t}\n\tif i.GetKey(positiveKey) {\n\t\tsum += 1.0\n\t}\n\treturn sum\n}", "func (js *joystickState) getAxis(joystick Joystick, axis int) float64 {\n\t// Check that the joystick and axis is valid, return 0 by default.\n\tif js.axis[joystick] == nil || axis >= len(js.axis[joystick]) || axis < 0 {\n\t\treturn 0\n\t}\n\treturn float64(js.axis[joystick][axis])\n}", "func (gdt *Basis) GetAxis(axis Int) Vector3 {\n\targ0 := gdt.getBase()\n\targ1 := axis.getBase()\n\n\tret := C.go_godot_basis_get_axis(GDNative.api, arg0, arg1)\n\n\treturn Vector3{base: &ret}\n\n}", "func (m *WorkbookChartAxes) GetValueAxis()(WorkbookChartAxisable) {\n return m.valueAxis\n}", "func (m *MergeCell) GetEndAxis() string {\n\treturn strings.Split((*m)[0], \":\")[1]\n}", "func (m *MergeCell) GetStartAxis() string {\n\treturn strings.Split((*m)[0], \":\")[0]\n}", "func UnpackAxis(value int64) UnpackAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"axis\"] = value\n\t}\n}", "func (i *InputHandler) GetAxis2D(negHorizontalKey, posHorizontalKey, negVerticalKey, posVerticalKey Key) Vector2 {\n\taxis := Vector2{i.GetAxis(negHorizontalKey, posHorizontalKey), i.GetAxis(negVerticalKey, posVerticalKey)}\n\treturn axis.Normalize()\n}", "func (gdt *Vector3) GetAxis(axis Vector3Axis) Real {\n\targ0 := gdt.getBase()\n\targ1 := axis.getBase()\n\n\tret := C.go_godot_vector3_get_axis(GDNative.api, arg0, arg1)\n\n\treturn Real(ret)\n}", "func (m *WorkbookChartAxes) GetCategoryAxis()(WorkbookChartAxisable) {\n return m.categoryAxis\n}", "func axisMax(val int) (int, int) {\n\tif val < 10 {\n\t\treturn 10, 1\n\t}\n\n\t// If val is less than 100, return val rounded up to the next 10\n\tif val < 100 {\n\t\tx := val % 10\n\t\treturn val + 10 - x, 10\n\t}\n\n\t// If val is less than 500, return val rounded up to the next 50\n\tif val < 500 {\n\t\tx := val % 50\n\t\treturn val + 50 - x, 50\n\t}\n\treturn 1000, 100\n}", "func PackAxis(value int64) PackAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"axis\"] = value\n\t}\n}", "func (m *WorkbookChartAxes) GetSeriesAxis()(WorkbookChartAxisable) {\n return m.seriesAxis\n}", "func (b *BoundingBox) LongestAxis() (axis int) {\n\n\tif b.Dim(0) < b.Dim(1) {\n\t\tif b.Dim(1) < b.Dim(2) {\n\t\t\taxis = 2\n\t\t} else {\n\t\t\taxis = 1\n\t\t}\n\t} else {\n\t\tif b.Dim(0) < b.Dim(2) {\n\t\t\taxis = 2\n\t\t} else {\n\t\t\taxis = 0\n\t\t}\n\t}\n\treturn\n}", "func OneHotAxis(value int64) OneHotAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"axis\"] = value\n\t}\n}", "func (w *Window) JoystickAxis(js Joystick, axis int) float64 {\n\treturn w.currJoy.getAxis(js, axis)\n}", "func (t *Dense) RollAxis(axis, start int, safe bool) (retVal *Dense, err error) {\n\tdims := t.Dims()\n\n\tif !(axis >= 0 && axis < dims) {\n\t\terr = errors.Errorf(invalidAxis, axis, dims)\n\t\treturn\n\t}\n\n\tif !(start >= 0 && start <= dims) {\n\t\terr = errors.Wrap(errors.Errorf(invalidAxis, axis, dims), \"Start axis is wrong\")\n\t\treturn\n\t}\n\n\tif axis < start {\n\t\tstart--\n\t}\n\n\tif axis == start {\n\t\tretVal = t\n\t\treturn\n\t}\n\n\taxes := BorrowInts(dims)\n\tdefer ReturnInts(axes)\n\n\tfor i := 0; i < dims; i++ {\n\t\taxes[i] = i\n\t}\n\tcopy(axes[axis:], axes[axis+1:])\n\tcopy(axes[start+1:], axes[start:])\n\taxes[start] = axis\n\n\tif safe {\n\t\treturn t.SafeT(axes...)\n\t}\n\terr = t.T(axes...)\n\tretVal = t\n\treturn\n}", "func (q Quat) Axis() Vec3f {\n\treturn Vec3f{q.X, q.Y, q.Z}\n}", "func getBlockAxisMove(xDt, yDt, zDt TickTime) (move blockAxisMove, dt TickTime) {\n if xDt <= yDt {\n if xDt <= zDt {\n return blockAxisMoveX, xDt\n } else {\n return blockAxisMoveZ, zDt\n }\n } else {\n if yDt <= zDt {\n return blockAxisMoveY, yDt\n } else {\n return blockAxisMoveZ, zDt\n }\n }\n return\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
intercepts returns where the segment intercepts segment o. If there is no interception then (0, 0) will be returned. wirelen is not provided.
func (v segment) intercepts(o segment) point { // With the assumption that no interceptions occur when segments are // parallel, and that segments always move either horizontally or // vertically (not both), we can pretty easily check for interceptions. // // First find the values where interception could occur, and what axis for // both segments are changing. I.e. if the segments are horizontal // or vertical. a, axAxis := v.unchangingAxis() b, bxAxis := o.unchangingAxis() if axAxis == bxAxis { // We're assuming that they can't overlap // when they are parallel return point{} } // Check if the first value (x or y) is on the interval of the // same axis of the other segment. Do this for the other value (axis) too. var aCanCollide bool if axAxis { aCanCollide = inRange(a, o.from.x, o.to.x) } else { aCanCollide = inRange(a, o.from.y, o.to.y) } var bCanCollide bool if bxAxis { bCanCollide = inRange(b, v.from.x, v.to.x) } else { bCanCollide = inRange(b, v.from.y, v.to.y) } // If both axes are in range then they collide if aCanCollide && bCanCollide { // Check if a is an x- or y-value if axAxis { return point{x: a, y: b} } return point{x: b, y: a} } return point{x: 0, y: 0} }
[ "func (w *wire) interceptPoints(o wire) []point {\n\tvar interceptPoints []point\n\tfor i := 1; i < len(w.points); i++ {\n\t\tv1 := segment{\n\t\t\tfrom: w.points[i-1],\n\t\t\tto: w.points[i],\n\t\t}\n\t\tfor u := 1; u < len(o.points); u++ {\n\t\t\tv2 := segment{\n\t\t\t\tfrom: o.points[u-1],\n\t\t\t\tto: o.points[u],\n\t\t\t}\n\t\t\tintercept := v1.intercepts(v2)\n\t\t\tif intercept.x != 0 && intercept.y != 0 {\n\t\t\t\t// Calculate total wire length (both wires combined)\n\t\t\t\tintercept.wireLen = v1.from.wireLen + intercept.distanceToPoint(v1.from) +\n\t\t\t\t\tv2.from.wireLen + intercept.distanceToPoint(v2.from)\n\t\t\t\tinterceptPoints = append(interceptPoints, intercept)\n\t\t\t}\n\t\t}\n\t}\n\treturn interceptPoints\n}", "func (l line2) SlopeIntercept() (float64, float64) {\n\tslope := (l.end.y - l.start.y) / (l.end.x - l.start.x)\n\tintercept := l.start.y - slope*l.start.x\n\treturn slope, intercept\n}", "func (t *Transcript) WhichIntronIntersect(reg Coor) []int {\n\tresult := []int{}\n\tfor i, intron := range t.Introns {\n\t\tif intron.Intersect(reg) {\n\t\t\tresult = append(result, i)\n\t\t}\n\t}\n\treturn result\n}", "func (t *Transcript) WhichExonIntersect(reg Coor) []int {\n\tresult := []int{}\n\tfor i, exon := range t.Exons {\n\t\tif exon.Intersect(reg) {\n\t\t\tresult = append(result, i)\n\t\t}\n\t}\n\treturn result\n}", "func (c *CovarStats) Intercept() float64 {\n\treturn c.yStats.Mean() - c.Slope()*c.xStats.Mean()\n}", "func getSlopeIntercept(p1 Point, p2 Point) (slope float64, intercept float64) {\n\tslope = (float64(p2.Y) - float64(p1.Y)) / (float64(p2.X) - float64(p1.X))\n\tintercept = float64(p1.Y) - slope*float64(p1.X)\n\n\treturn slope, intercept\n}", "func (cpu *Mos6502) ind() uint8 {\n\tlowByte := cpu.read(cpu.pc)\n\tcpu.pc++\n\n\thighByte := cpu.read(cpu.pc)\n\tcpu.pc++\n\n\tpointer := (word(highByte) << 8) | word(lowByte)\n\n\tvar a word\n\tif lowByte == 0xff {\n\t\ta = (word(cpu.read(pointer&0xFF00)) << 8) | word(cpu.read(pointer))\n\t} else {\n\t\ta = (word(cpu.read(pointer+1)) << 8) | word(cpu.read(pointer))\n\t}\n\tcpu.addressAbsolute = a\n\n\treturn 0\n}", "func SlopeInd(m, xc, yc, xlen float64, lbl string, flip, xlog, ylog bool, args, argsLbl *A) {\n\tif args == nil {\n\t\targs = &A{C: \"k\"}\n\t}\n\targs.NoClip = true\n\tl := 0.5 * xlen\n\tx := []float64{xc - l, xc + l, xc + l, xc - l}\n\ty := []float64{yc - m*l, yc - m*l, yc + m*l, yc - m*l}\n\tif flip {\n\t\tx[1] = xc - l\n\t\ty[1] = yc + m*l\n\t}\n\tdx, dy := x[2]-x[0], y[2]-y[0]\n\td := 0.03 * math.Sqrt(dx*dx+dy*dy)\n\txm := xc - l - d\n\txp := xc + l + d\n\tym := yc + m*l - d\n\typ := yc + m*l + d\n\tyr := yc - m*l + d\n\tys := yc - m*l - d\n\tif xlog {\n\t\tfor i := 0; i < 4; i++ {\n\t\t\tx[i] = math.Pow(10.0, x[i])\n\t\t}\n\t\txc = math.Pow(10.0, xc)\n\t\txm = math.Pow(10.0, xm)\n\t\txp = math.Pow(10.0, xp)\n\t}\n\tif ylog {\n\t\tfor i := 0; i < 4; i++ {\n\t\t\ty[i] = math.Pow(10.0, y[i])\n\t\t}\n\t\tyc = math.Pow(10.0, yc)\n\t\tym = math.Pow(10.0, ym)\n\t\typ = math.Pow(10.0, yp)\n\t\tyr = math.Pow(10.0, yr)\n\t\tys = math.Pow(10.0, ys)\n\t}\n\tPlot(x, y, args)\n\tif lbl != \"\" {\n\t\tif argsLbl == nil {\n\t\t\targsLbl = &A{C: \"k\", Fsz: 6}\n\t\t}\n\t\targsLbl.NoClip = true\n\t\tif flip {\n\t\t\targsLbl.Ha = \"center\"\n\t\t\tif m < 0 {\n\t\t\t\targsLbl.Va = \"top\"\n\t\t\t\tText(xc, ym, \"1\", argsLbl)\n\t\t\t} else {\n\t\t\t\targsLbl.Va = \"bottom\"\n\t\t\t\tText(xc, yp, \"1\", argsLbl)\n\t\t\t}\n\t\t\targsLbl.Ha = \"right\"\n\t\t\targsLbl.Va = \"center\"\n\t\t\tText(xm, yc, lbl, argsLbl)\n\t\t} else {\n\t\t\targsLbl.Ha = \"center\"\n\t\t\tif m < 0 {\n\t\t\t\targsLbl.Va = \"bottom\"\n\t\t\t\tText(xc, yr, \"1\", argsLbl)\n\t\t\t} else {\n\t\t\t\targsLbl.Va = \"top\"\n\t\t\t\tText(xc, ys, \"1\", argsLbl)\n\t\t\t}\n\t\t\targsLbl.Ha = \"left\"\n\t\t\targsLbl.Va = \"center\"\n\t\t\tText(xp, yc, lbl, argsLbl)\n\t\t}\n\t}\n}", "func (xye *XYextentSearch) Intersect(ext [4]float64) []int {\n\tx0 := slice.GetIndxFloat64(xye.m[0].Val, ext[0])\n\tx1 := slice.GetIndxFloat64(xye.m[0].Val, ext[1])\n\ty0 := slice.GetIndxFloat64(xye.m[2].Val, ext[2])\n\ty1 := slice.GetIndxFloat64(xye.m[2].Val, ext[3])\n\treturn slice.Intersect(xye.m[0].Indx[x0:x1], xye.m[2].Indx[y0:y1])\n}", "func getLinesOfSight(grid *Grid, rowIdx int, colIdx int) ([]int, []int, []int, []int) {\n\twest := utils.Reversed(grid.Trees[rowIdx][0:colIdx])\n\teast := grid.Trees[rowIdx][colIdx+1:]\n\tnorth := []int{}\n\tfor _, row := range grid.Trees[:rowIdx] {\n\t\tnorth = append(north, row[colIdx])\n\t}\n\tnorth = utils.Reversed(north)\n\tsouth := []int{}\n\tfor _, row := range grid.Trees[rowIdx+1:] {\n\t\tsouth = append(south, row[colIdx])\n\t}\n\treturn north, south, east, west\n}", "func PSRLO(i, x operand.Op) { ctx.PSRLO(i, x) }", "func (s *lineSegment) overlapsWith(o *lineSegment) *coordinate {\n\n\t// If both are in the same orientation, we have division by zero.\n\tif s.vertical && o.vertical || !s.vertical && !o.vertical {\n\t\treturn nil\n\t}\n\t// Create a candidate for overlapping\n\t// https://math.stackexchange.com/questions/375083/given-coordinates-of-beginning-and-end-of-two-intersecting-line-segments-how-do\n\tx := -1 * ((s.xStart-s.xEnd)*(o.xStart*o.yEnd-o.xEnd*o.yStart) - (o.xEnd-o.xStart)*(s.xEnd*s.yStart-s.xStart*s.yEnd)) / ((o.yStart-o.yEnd)*(s.xStart-s.xEnd) - (o.xEnd-o.xStart)*(s.yEnd-s.yStart))\n\ty := -1 * (o.xStart*o.yEnd*s.yStart - o.xStart*o.yEnd*s.yEnd - o.xEnd*o.yStart*s.yStart + o.xEnd*o.yStart*s.yEnd - o.yStart*s.xStart*s.yEnd + o.yStart*s.xEnd*s.yStart + o.yEnd*s.xStart*s.yEnd - o.yEnd*s.xEnd*s.yStart) / (-1*o.xStart*s.yStart + o.xStart*s.yEnd + o.xEnd*s.yStart - o.xEnd*s.yEnd + o.yStart*s.xStart - o.yStart*s.xEnd - o.yEnd*s.xStart + o.yEnd*s.xEnd)\n\t// we never match on (0,0)\n\tif x == 0 && y == 0 {\n\t\treturn nil\n\t}\n\ttest := coordinate{\n\t\tx: x,\n\t\ty: y,\n\t}\n\tif s.includesCoordinate(&test) && o.includesCoordinate(&test) {\n\t\treturn &test\n\t}\n\treturn nil\n}", "func spanOf(at positioner) posSpan {\n\tswitch x := at.(type) {\n\tcase nil:\n\t\tpanic(\"nil positioner\")\n\tcase posSpan:\n\t\treturn x\n\tcase ast.Node:\n\t\tpos := x.Pos()\n\t\treturn posSpan{pos, pos, x.End()}\n\tcase *operand:\n\t\tif x.expr != nil {\n\t\t\tpos := x.Pos()\n\t\t\treturn posSpan{pos, pos, x.expr.End()}\n\t\t}\n\t\treturn posSpan{nopos, nopos, nopos}\n\tdefault:\n\t\tpos := at.Pos()\n\t\treturn posSpan{pos, pos, pos}\n\t}\n}", "func (gsfs *GoSourceFileStruct) getLineFromOffsets(sOfst, eOfst int) (lStart, lEnd int) {\n\tfor lineNb, lineIdxs := range gsfs.linesIndexes {\n\t\tswitch {\n\t\tcase sOfst >= lineIdxs[0] && sOfst <= lineIdxs[1]:\n\t\t\tlStart = lineNb\n\t\t\tif eOfst <= lineIdxs[1] { // only one line\n\t\t\t\tlEnd = lineNb\n\t\t\t\treturn\n\t\t\t}\n\t\tcase eOfst >= lineIdxs[0] && eOfst <= lineIdxs[1]:\n\t\t\tlEnd = lineNb\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}", "func (a *ALU) InstrXCHInd(R1 bool) {\n\tvar regLoc uint8\n\tvar loc uint8\n\tif R1 {\n\t\tregLoc = a.regAddr(1)\n\t} else {\n\t\tregLoc = a.regAddr(0)\n\t}\n\tloc = a.InternalRAM[regLoc]\n\ta.Accum, a.InternalRAM[loc] = a.InternalRAM[loc], a.Accum\n}", "func startIdx[E any](haystack, needle []E) int {\n\tp := &needle[0]\n\tfor i := range haystack {\n\t\tif p == &haystack[i] {\n\t\t\treturn i\n\t\t}\n\t}\n\t// TODO: what if the overlap is by a non-integral number of Es?\n\tpanic(\"needle not found\")\n}", "func (i *IE) OffendingIE() (uint16, error) {\n\tif i.Type != OffendingIE {\n\t\treturn 0, &InvalidTypeError{Type: i.Type}\n\t}\n\n\tif len(i.Payload) < 2 {\n\t\treturn 0, io.ErrUnexpectedEOF\n\t}\n\n\treturn binary.BigEndian.Uint16(i.Payload[0:2]), nil\n}", "func GetOverlappedIds(c *gin.Context) {}", "func getCousinIndex(dataLength, leafIndex int) (cousinIndex int) {\n\tif leafIndex == 0 {\n\t\t// beginning so return cousin of sibling as leafIndex\n\t\treturn 2\n\t}\n\tend := dataLength - 1\n\tif leafIndex == end {\n\t\t// at end of tree so return cousin of sibling as leafIndex\n\t\tif leafIndex%2 == 0 {\n\t\t\t// if even at the end, cousin is one to the left\n\t\t\treturn end - 1\n\t\t}\n\t\t// if odd leafIndex at the end, cousin is two to the left\n\t\treturn end - 2\n\t}\n\tif leafIndex%2 == 1 {\n\t\t// odd leafIndex so cousin to the right\n\t\treturn leafIndex + 1\n\t} else {\n\t\t// even leafIndex so cousin to the left\n\t\treturn leafIndex - 1\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
inRange returns whether true if a >= val = val <= a.
func inRange(val, a, b int) bool { return val >= a && val <= b || val >= b && val <= a }
[ "func InRange[T comdef.IntOrFloat](val, min, max T) bool {\n\treturn val >= min && val <= max\n}", "func InRange(val, min, max float64) float64 {\n\tif val < min {\n\t\treturn min\n\t} else if val > max {\n\t\treturn max\n\t}\n\treturn val\n}", "func InRange(value, left, right float64) bool {\n\tif left > right {\n\t\tleft, right = right, left\n\t}\n\treturn value >= left && value <= right\n}", "func inBound(min, v, max int) bool {\n\treturn v >= min && v < max\n}", "func within(value, low, high int) bool {\n\treturn value >= low && value <= high\n}", "func inInterval(a int, min, max int) bool {\n\treturn (min <= a) && (a < max)\n}", "func ValueIsInRange(candidate int, lowerBound int, upperBound int) bool {\n\treturn lowerBound <= candidate && candidate < upperBound\n}", "func IsInRange(input int, start int, end int) bool {\n\treturn input >= start && input <= end\n}", "func (rng Range) InRange(ip *net.IP) bool {\n\tif bytes.Compare(*ip, rng.start) >= 0 && bytes.Compare(*ip, rng.end) <= 0 {\n\t\treturn true\n\t}\n\treturn false\n}", "func isOnRange(x, a, b float64) (onRange bool) {\n\tif a <= b {\n\t\tif a <= x && x <= b {\n\t\t\tonRange = true\n\t\t}\n\t} else {\n\t\tif b <= x && x <= a {\n\t\t\tonRange = true\n\t\t}\n\t}\n\treturn onRange\n}", "func inRange(r ipRange, ipAddress net.IP) bool {\n\t// strcmp type byte comparison\n\tif bytes.Compare(ipAddress, r.start) >= 0 && bytes.Compare(ipAddress, r.end) < 0 {\n\t\treturn true\n\t}\n\treturn false\n}", "func InRange(n int, r []int) bool {\n\treturn len(r) == 2 && (r[0] <= n && n <= r[1])\n}", "func (p Point) In(rg Range) bool {\n\treturn p.X >= rg.Min.X && p.X < rg.Max.X && p.Y >= rg.Min.Y && p.Y < rg.Max.Y\n}", "func InUintRange[T comdef.Uint](val, min, max T) bool {\n\tif max == 0 {\n\t\treturn val >= min\n\t}\n\treturn val >= min && val <= max\n}", "func (i *Number) IsInRange(start, end Number) bool {\n\treturn i.value <= end.value && i.value >= start.value\n}", "func (r *SplineRange) InBounds(x float64) bool {\n\tmin, max := r.Bounds()\n\treturn min <= x && max >= x\n}", "func (s StatusCode) In(r StatusCodeRange) bool {\n\treturn r.Min <= s && s <= r.Max\n}", "func between(start, elt, end *big.Int, inclusive bool) bool {\n\tif end.Cmp(start) > 0 {\n\t\treturn (start.Cmp(elt) < 0 && elt.Cmp(end) < 0) || (inclusive && elt.Cmp(end) == 0)\n\t}\n\treturn start.Cmp(elt) < 0 || elt.Cmp(end) < 0 || (inclusive && elt.Cmp(end) == 0)\n}", "func inRangeEntries(v int, entries []*RangeEntry) bool {\n\tfor _, re := range entries {\n\t\tif re.StartVendorID <= v && v <= re.EndVendorID {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run executes the pull command.
func (c *PullCommand) Run(args []string) int { cmdFlags := flag.NewFlagSet("pull", flag.ContinueOnError) cmdFlags.Usage = func() { c.UI.Output(c.Help()) } config := c.Config cmdFlags.StringVar(&config.Secret, "secret", config.Secret, "") cmdFlags.StringVar(&config.TargetDirectory, "target", config.TargetDirectory, "") cmdFlags.StringVar(&config.Encoding, "encoding", config.Encoding, "") cmdFlags.StringVar(&config.Format, "format", config.Format, "") req := new(phrase.DownloadRequest) cmdFlags.StringVar(&req.Tag, "tag", "", "") var updatedSince string cmdFlags.StringVar(&updatedSince, "updated-since", "", "") cmdFlags.BoolVar(&req.ConvertEmoji, "convert-emoji", false, "") cmdFlags.BoolVar(&req.SkipUnverifiedTranslations, "skip-unverified-translations", false, "") cmdFlags.BoolVar(&req.IncludeEmptyTranslations, "include-empty-translations", false, "") if err := cmdFlags.Parse(args); err != nil { return 1 } if updatedSince != "" { var err error req.UpdatedSince, err = time.Parse(timeFormat, updatedSince) if err != nil { c.UI.Error(fmt.Sprintf("Error parsing updated-since (%s), format should be YYYYMMDDHHMMSS", updatedSince)) return 1 } } if config.Format == "" { config.Format = defaultDownloadFormat } c.API.AuthToken = config.Secret req.Encoding = config.Encoding req.Format = config.Format if err := config.Valid(); err != nil { c.UI.Error(err.Error()) return 1 } err := c.fetch(req, cmdFlags.Args()) if err != nil { c.UI.Error(fmt.Sprintf("Error encountered fetching the locales:\n\t%s", err.Error())) return 1 } return 0 }
[ "func (p *PullCommand) runPull(args []string) error {\n\treturn pullMissingImage(context.Background(), p.cli.Client(), args[0], true)\n}", "func (config *ReleaseCommandConfig) Run() error {\n\n\tgit, err := gitpkg.GetGit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = release(git)\n\n\treturn err\n}", "func executePull(location string) {\n\tfmt.Println(\"Pulling from \" + location + \" ...\")\n}", "func (c *PushCommand) Run(args []string) int {\n\n\treturn 0\n}", "func doPull() (err error) {\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", fmt.Sprintf(\"cd %s; git pull;\", workdir))\n\tlog.Printf(\"run it: cd %s; git pull;\", workdir)\n\terr = cmd.Run()\n\treturn\n}", "func (c *config) pull(remote string, branch string) (output string, err error) {\n\tlog.Printf(\"pulling: %v/%v\", remote, branch)\n\n\tdefaultCommand := []string{\"pull\", remote, branch}\n\n\treturn c.command(defaultCommand...)\n}", "func (p *Promoter) Run() (err error) {\n\tspec, err := p.sdAPI.GetCommand(p.smallSpec)\n\tif err != nil {\n\t\tfmt.Printf(\"%v does not exist yet\\n\", p.tag)\n\t} else if spec.Version == p.targetVersion {\n\t\tfmt.Printf(\"%v has been already tagged with %v\\n\", spec.Version, p.tag)\n\t\treturn\n\t} else {\n\t\tfmt.Printf(\"Removing %v from %v\\n\", spec.Version, p.tag)\n\t}\n\tres, err := p.sdAPI.TagCommand(p.smallSpec, p.targetVersion, p.tag)\n\tif err != nil {\n\t\tfmt.Println(\"Promoting is aborted\")\n\t\treturn\n\t}\n\n\tfmt.Printf(\"Promoting %v to %v\\n\", res.Version, res.Tag)\n\treturn\n}", "func (cmd *DownCmd) Run(cobraCmd *cobra.Command, args []string) {\n\tlog.StartFileLogging()\n\n\tconfig := configutil.GetConfig(false)\n\n\treleaseName := *config.DevSpace.Release.Name\n\tkubectl, err := kubectl.NewClient()\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to create new kubectl client: %s\", err.Error())\n\t}\n\n\tclient, err := helmClient.NewClient(kubectl, false)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to initialize helm client: %s\", err.Error())\n\t}\n\n\tlog.StartWait(\"Deleting release \" + releaseName)\n\tres, err := client.DeleteRelease(releaseName, true)\n\tlog.StopWait()\n\n\tif res != nil && res.Info != \"\" {\n\t\tlog.Donef(\"Successfully deleted release %s: %s\", releaseName, res.Info)\n\t} else if err != nil {\n\t\tlog.Donef(\"Error deleting release %s: %s\", releaseName, err.Error())\n\t} else {\n\t\tlog.Donef(\"Successfully deleted release %s\", releaseName)\n\t}\n}", "func (o *Options) Run() error {\n\terr := o.Validate()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to validate options\")\n\t}\n\n\tpr, err := o.discoverPullRequest()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to discover pull request\")\n\t}\n\n\tlog.Logger().Infof(\"found PullRequest %s\", pr.Link)\n\n\tpreview, _, err := previews.GetOrCreatePreview(o.PreviewClient, o.Namespace, pr, o.PreviewHelmfile)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to upsert the Preview resource in namespace %s\", o.Namespace)\n\t}\n\tlog.Logger().Infof(\"upserted preview %s\", preview.Name)\n\n\treturn o.helmfileSyncPreview(pr, preview)\n}", "func Run(cmd *cobra.Command, args []string) {\n\tvar repo *dbt.DBTRepoServer\n\n\tif configFile != \"\" {\n\t\tr, err := dbt.NewRepoServer(configFile)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to create reposerver from file: %s\", err)\n\t\t}\n\n\t\trepo = r\n\n\t} else {\n\t\trepo = &dbt.DBTRepoServer{\n\t\t\tAddress: address,\n\t\t\tPort: port,\n\t\t\tServerRoot: serverRoot,\n\t\t}\n\t}\n\n\tif repo == nil {\n\t\tlog.Fatalf(\"Failed to initialize reposerver object. Cannot continue.\")\n\t}\n\n\terr := repo.RunRepoServer()\n\tif err != nil {\n\t\tlog.Fatalf(\"Error running server: %s\", err)\n\t}\n}", "func pullCmd(c *cli.Context) error {\n\tvar fqRegistries []string\n\n\targs := c.Args()\n\tif len(args) == 0 {\n\t\tlogrus.Errorf(\"an image name must be specified\")\n\t\treturn nil\n\t}\n\tif len(args) > 1 {\n\t\tlogrus.Errorf(\"too many arguments. Requires exactly 1\")\n\t\treturn nil\n\t}\n\timage := args[0]\n\tsrcRef, err := alltransports.ParseImageName(image)\n\tif err != nil {\n\t\tfqRegistries, err = getRegistriesToTry(image)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t} else {\n\t\tfqRegistries = append(fqRegistries, srcRef.DockerReference().String())\n\t}\n\truntime, err := getRuntime(c)\n\tdefer runtime.Shutdown(false)\n\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"could not create runtime\")\n\t}\n\tfor _, fqname := range fqRegistries {\n\t\tfmt.Printf(\"Trying to pull %s...\", fqname)\n\t\tif err := runtime.PullImage(fqname, c.Bool(\"all-tags\"), os.Stdout); err != nil {\n\t\t\tfmt.Printf(\" Failed\\n\")\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn errors.Errorf(\"error pulling image from %q\", image)\n}", "func pullRepo() {\n\tcmd := []string{\"git\", \"pull\", \"--prune\", \"--quiet\"}\n\tcmds := strings.Join(cmd, \" \")\n\tlog.Printf(\"- %s\", cmds)\n\tc := exec.Command(cmd[0], cmd[1:]...)\n\tstart := time.Now()\n\tout, err := c.CombinedOutput()\n\tduration := time.Since(start)\n\texit := 0\n\tif err != nil {\n\t\texit = -1\n\t\tif len(out) == 0 {\n\t\t\tout = []byte(\"<failure>\\n\" + err.Error() + \"\\n\")\n\t\t}\n\t\tif exiterr, ok := err.(*exec.ExitError); ok {\n\t\t\tif status, ok := exiterr.Sys().(syscall.WaitStatus); ok {\n\t\t\t\texit = status.ExitStatus()\n\t\t\t}\n\t\t}\n\t}\n\tlog.Printf(\"$ %s (exit:%d in %s)\\n%s\", cmds, exit, roundTime(duration), normalizeUTF8(out))\n}", "func (p *pullExecutor) Execute(ctx context.Context) error {\n\tif len(p.Pull.Packages) != 0 {\n\t\terr := p.pullPackages(p.Pull.Packages)\n\t\tif err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t}\n\tif len(p.Pull.Apps) != 0 {\n\t\terr := p.pullApps(p.Pull.Apps)\n\t\tif err != nil {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t}\n\terr := p.pullConfiguredPackages()\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\terr = p.applyPackageLabels()\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\terr = p.unpackPackages()\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\t// after all pulling and unpacking has been done, set proper ownership\n\t// on the data dir\n\tstateDir, err := state.GetStateDir()\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\terr = utils.Chown(filepath.Join(stateDir, defaults.LocalDir),\n\t\tp.ServiceUser.UID, p.ServiceUser.GID)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\treturn nil\n}", "func (c *updateCmd) Run(_ *buildChild) error {\n\treturn nil\n}", "func Fetch(templateDir string) error {\n\t_, err := util.Run(templateDir, \"git\", \"pull\")\n\treturn err\n}", "func (s Step) PullCommand() []string {\n\treturn []string{\"pull\", s.ImageName()}\n}", "func (c *pushCmd) Run(child *pushChild, logger logging.Logger) error {\n\tlogger = logger.WithValues(\"tag\", child.tag)\n\ttag, err := name.NewTag(child.tag)\n\tif err != nil {\n\t\tlogger.Debug(\"Failed to create tag for package\", \"error\", err)\n\t\treturn err\n\t}\n\n\t// If package is not defined, attempt to find single package in current\n\t// directory.\n\tif c.Package == \"\" {\n\t\tlogger.Debug(\"Trying to find package in current directory\")\n\t\twd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\tlogger.Debug(\"Failed to find package in directory\", \"error\", errors.Wrap(err, errGetwd))\n\t\t\treturn errors.Wrap(err, errGetwd)\n\t\t}\n\t\tpath, err := xpkg.FindXpkgInDir(child.fs, wd)\n\t\tif err != nil {\n\t\t\tlogger.Debug(\"Failed to find package in directory\", \"error\", errors.Wrap(err, errFindPackageinWd))\n\t\t\treturn errors.Wrap(err, errFindPackageinWd)\n\t\t}\n\t\tc.Package = path\n\t\tlogger.Debug(\"Found package in directory\", \"path\", path)\n\t}\n\timg, err := tarball.ImageFromPath(c.Package, nil)\n\tif err != nil {\n\t\tlogger.Debug(\"Failed to create image from package tarball\", \"error\", err)\n\t\treturn err\n\t}\n\tif err := remote.Write(tag, img, remote.WithAuthFromKeychain(authn.DefaultKeychain)); err != nil {\n\t\tlogger.Debug(\"Failed to push created image to remote location\", \"error\", err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (cmd *DownloadFirmwareCommand) Run(c *client.Client, args []string) error {\n\tvar path string\n\tif len(args) > 0 {\n\t\tpath = args[0]\n\t} else {\n\t\tpath = fmt.Sprintf(\"/firmware/%v/download\", cmd.FirmwareID)\n\t}\n\tlogger := goa.NewLogger(log.New(os.Stderr, \"\", log.LstdFlags))\n\tctx := goa.WithLogger(context.Background(), logger)\n\tresp, err := c.DownloadFirmware(ctx, path)\n\tif err != nil {\n\t\tgoa.LogError(ctx, \"failed\", \"err\", err)\n\t\treturn err\n\t}\n\n\tgoaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint)\n\treturn nil\n}", "func (p *Publisher) Run() error {\n\tspecResponse, err := p.sdAPI.PostCommand(p.commandSpec)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Post failed: %v\", err)\n\t}\n\n\terr = p.tagCommand(specResponse)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Tag failed: %v\", err)\n\t}\n\n\t// Published successfully\n\t// Show version number of command published by sd-cmd\n\tfmt.Println(specResponse.Version)\n\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Help displays available options for the pull command.
func (c *PullCommand) Help() string { helpText := ` Usage: phrase pull [options] [LOCALE] Download the translation files in the current project. Options: --format=yml See documentation for list of allowed formats --target=./phrase/locales Target folder to store locale files --tag=foo Limit results to a given tag instead of all translations --updated-since=YYYYMMDDHHMMSS Limit results to translations updated after the given date (UTC) --include-empty-translations Include empty translations in the result --convert-emoji Convert Emoji symbols --encoding=utf-8 Convert .strings or .properties with alternate encoding --skip-unverified-translations Skip unverified translations in the result --secret=YOUR_AUTH_TOKEN The Auth Token to use for this operation instead of the saved one (optional) ` return strings.TrimSpace(helpText) }
[ "func (d *downloadCommand) Help() string {\n\thelp := `Usage: hashicorp-releases download <product> <version>`\n\treturn help\n}", "func help() {\n\tlog.Infoln(\"#: the number of the peer you want to connect to\")\n\tlog.Infoln(\"r: refresh peer list\")\n\tlog.Infoln(\"q: quit pcp\")\n\tlog.Infoln(\"?: this help message\")\n}", "func help() {\n\tfmt.Println(`\n usage:\n hping [-c count][-t timeout][-m method][-d data] url\n\n options:\t\t \n -c count Send 'count' requests (default: 4)\n -t timeout Specifies a time limit for requests in second (default is 2) \n -m method HTTP methods: GET/POST/HEAD (default: HEAD)\n -d data Sending the given data (text/json) (default: \"mylg\")\n\t`)\n}", "func commandHelp(args string, ui plugin.UI) {\n\tif args == \"\" {\n\t\thelp := usage(false)\n\t\thelp = help + `\n : Clear focus/ignore/hide/tagfocus/tagignore\n\n type \"help <cmd|option>\" for more information\n`\n\n\t\tui.Print(help)\n\t\treturn\n\t}\n\n\tif c := pprofCommands[args]; c != nil {\n\t\tui.Print(c.help(args))\n\t\treturn\n\t}\n\n\tif help, ok := configHelp[args]; ok {\n\t\tui.Print(help + \"\\n\")\n\t\treturn\n\t}\n\n\tui.PrintErr(\"Unknown command: \" + args)\n}", "func (get *BaseCommandGetOperation) Help() string {\n\treturn \"\"\n}", "func help(cmd string) {\n\thelpDetails := map[string]string{\n\t\t\"ls\": \"ls path [path...] - prints list of keys from each path\",\n\t\t\"cat\": \"cat path key - prints the value\",\n\t}\n\tif cmd == \"\" {\n\t\tfmt.Println(\"Available commands are:\")\n\t\tfor cmd, details := range helpDetails {\n\t\t\tfmt.Println(cmd, \":\", details)\n\t\t}\n\t} else {\n\t\tfmt.Println(helpDetails[cmd])\n\t}\n}", "func GetHelp() string {\n\tmsg := \"List of available commands\\n /status - returns validator status, voting power, current block height \" +\n\t\t\"and network block height\\n /peers - returns number of connected peers\\n /node - return status of caught-up\\n\" +\n\t\t\"/balance - returns the current balance of your account \\n /list - list out the available commands\"\n\n\treturn msg\n}", "func (c *PullCommand) Synopsis() string {\n\treturn \"Download the translation files in the current project\"\n}", "func (up *BaseProvisionUpOperation) Help() string {\n\treturn \"\"\n}", "func (c *GetCommand) Help() string {\n\thelpText := `\nUsage :\n\twikible get [options]\n\n\tGet the wiki pages to code.\n\nOptions:\n\t-i parent id\n\t-a wiki address\n`\n\treturn strings.TrimSpace(helpText)\n}", "func (cli *CLI) Help(base Command, name string, args []string) (string, error) {\n\tb := &bytes.Buffer{}\n\terr := cli.printHelp(NewOutput(b), base, name, args)\n\treturn b.String(), err\n}", "func (vm *VM) Help(cmd *cobra.Command, args []string) {\n\n\tcli := ui.NewCLI(vm.Config)\n\tversionMap := map[string]string{\"ReleaseVersion\": ReleaseVersion, \"GitHash\": GitHash}\n\n\tif len(args) == 0 {\n\t\tfmt.Println(cli.Render(\"vmUsage\", versionMap))\n\t\treturn\n\t}\n\n\thelpType := strings.ToLower(args[0])\n\tswitch helpType {\n\tcase \"scanners\":\n\t\tfmt.Println(cli.Render(\"scannersUsage\", versionMap))\n\n\tcase \"agent-groups\":\n\t\tfmt.Print(cli.Render(\"agentGroupsUsage\", versionMap))\n\tcase \"agents\":\n\t\tfmt.Print(cli.Render(\"agentsUsage\", versionMap))\n\tcase \"export-vulns\":\n\t\tfmt.Print(cli.Render(\"ExportVulnsHelp\", versionMap))\n\n\tdefault:\n\t\tfmt.Println(cli.Render(\"vmUsage\", versionMap))\n\t}\n\n\treturn\n}", "func (p provision) Help() string {\n\treturn \"<start> Start the provision-api on 0.0.0.0:7000\"\n}", "func (c *Ping) Help() string {\n\treturn `Usage: PING [message] Returns PONG if no argument is provided, otherwise return a copy of the argument as a bulk.`\n}", "func DisplayCommandHelp(b *Brute, m *discordgo.MessageCreate, cmd *Command) {\n\tif cmd.HelpFunc != nil {\n\t\tcmd.HelpFunc(b, b.Session, m)\n\t\treturn\n\t}\n\n\tif cmd.HelpStr != \"\" {\n\t\t_, err := b.Session.ChannelMessageSend(m.ChannelID, cmd.HelpStr)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Failed to send message: %v\\n\", err)\n\t\t}\n\t\treturn\n\t}\n\n\t_, err := b.Session.ChannelMessageSend(m.ChannelID, fmt.Sprintf(\"No usage info defined for `%s`. Here - grab some beers while waiting for Mak to add them :beers:\", cmd.Cmd))\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to send message: %v\\n\", err)\n\t}\n}", "func Help() {\n\tlog.Println(\"\")\n\tlog.Println(\"Commands:\")\n\tlog.Println(\" Init : Create an empty deps.json\")\n\tlog.Println(\" Add [nickname] : Add a dependency (interactive)\")\n\tlog.Println(\" Install : Install all the dependencies listed in deps.json (default)\")\n\tlog.Println(\" Update [nickname] [branch] : Update [nickname] to use the latest commit in [branch]\")\n\tlog.Println(\" Self-Upgrade : Upgrade depman to the latest version on the master branch\")\n\tlog.Println(\" Help : Display this help\")\n\tlog.Println(\" Show-Frozen : Show dependencies as resolved to commit IDs\")\n\tlog.Println(\"\")\n\tlog.Println(\"Example: depman --verbose install\")\n\tlog.Println(\"\")\n\t//log.Println(\" freeze : For each dependency change tag and branch versions to commits (not yet implemented)\")\n\tlog.Println(\"Options:\")\n\tflag.PrintDefaults()\n}", "func help() {\r\n fmt.Printf(\"ORIGAMI\\n\")\r\n fmt.Printf(\"\\tA web app that checks the toner levels of printers at the Elizabethtown College campus.\\n\\n\")\r\n fmt.Printf(\"USAGE\\n\")\r\n fmt.Printf(\"\\tUsage: origami [-f filepath | -h]\\n\\n\")\r\n fmt.Printf(\"OPTIONS\\n\")\r\n fmt.Printf(\"\\t-f: specify the filepath of the config file (\\\"./origami.conf\\\" by default)\\n\")\r\n fmt.Printf(\"\\t-h: this menu\\n\\n\")\r\n fmt.Printf(\"AUTHOR\\n\")\r\n fmt.Printf(\"\\tRory Dudley (aka pinecat: https://github.com/pinecat/origamiv2)\\n\\n\")\r\n fmt.Printf(\"EOF\\n\")\r\n}", "func (p *plugin) cmdHelp(w irc.ResponseWriter, r *irc.Request, params cmd.ParamList) {\n\tproto.PrivMsg(w, r.Target, TextHelpDisplay, r.SenderName)\n}", "func (c *Config) getHelp() {\n\tcm := cmds.Command{\n\t\tName: \"help\",\n\t\tDescription: \"prints information about how to use pod\",\n\t\tEntrypoint: helpFunction,\n\t\tCommands: nil,\n\t}\n\t// first add all the options\n\tc.ForEach(func(ifc opt.Option) bool {\n\t\to := fmt.Sprintf(\"Parallelcoin Pod All-in-One Suite\\n\\n\")\n\t\tvar dt details\n\t\tswitch ii := ifc.(type) {\n\t\tcase *binary.Opt:\n\t\t\tdt = details{ii.GetMetadata().Name, ii.Option, ii.Description, fmt.Sprint(ii.Def), ii.Aliases,\n\t\t\t\tii.Documentation,\n\t\t\t}\n\t\tcase *list.Opt:\n\t\t\tdt = details{ii.GetMetadata().Name, ii.Option, ii.Description, fmt.Sprint(ii.Def), ii.Aliases,\n\t\t\t\tii.Documentation,\n\t\t\t}\n\t\tcase *float.Opt:\n\t\t\tdt = details{ii.GetMetadata().Name, ii.Option, ii.Description, fmt.Sprint(ii.Def), ii.Aliases,\n\t\t\t\tii.Documentation,\n\t\t\t}\n\t\tcase *integer.Opt:\n\t\t\tdt = details{ii.GetMetadata().Name, ii.Option, ii.Description, fmt.Sprint(ii.Def), ii.Aliases,\n\t\t\t\tii.Documentation,\n\t\t\t}\n\t\tcase *text.Opt:\n\t\t\tdt = details{ii.GetMetadata().Name, ii.Option, ii.Description, fmt.Sprint(ii.Def), ii.Aliases,\n\t\t\t\tii.Documentation,\n\t\t\t}\n\t\tcase *duration.Opt:\n\t\t\tdt = details{ii.GetMetadata().Name, ii.Option, ii.Description, fmt.Sprint(ii.Def), ii.Aliases,\n\t\t\t\tii.Documentation,\n\t\t\t}\n\t\t}\n\t\tcm.Commands = append(cm.Commands, cmds.Command{\n\t\t\tName: dt.option,\n\t\t\tDescription: dt.desc,\n\t\t\tEntrypoint: func(ifc interface{}) (e error) {\n\t\t\t\to += fmt.Sprintf(\"Help information about %s\\n\\n\\toption name:\\n\\t\\t%s\\n\\taliases:\\n\\t\\t%s\\n\\tdescription:\\n\\t\\t%s\\n\\tdefault:\\n\\t\\t%v\\n\",\n\t\t\t\t\tdt.name, dt.option, dt.aliases, dt.desc, dt.def,\n\t\t\t\t)\n\t\t\t\tif dt.documentation != \"\" {\n\t\t\t\t\to += \"\\tdocumentation:\\n\\t\\t\" + dt.documentation + \"\\n\\n\"\n\t\t\t\t}\n\t\t\t\tfmt.Fprint(os.Stderr, o)\n\t\t\t\treturn\n\t\t\t},\n\t\t\tCommands: nil,\n\t\t},\n\t\t)\n\t\treturn true\n\t},\n\t)\n\t// next add all the commands\n\tc.Commands.ForEach(func(cm cmds.Command) bool {\n\t\t\n\t\treturn true\n\t}, 0, 0,\n\t)\n\tc.Commands = append(c.Commands, cm)\n\treturn\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Synopsis displays a synopsis of the pull command.
func (c *PullCommand) Synopsis() string { return "Download the translation files in the current project" }
[ "func (c *PullCommand) Help() string {\n\thelpText := `\n\tUsage: phrase pull [options] [LOCALE]\n\n\t Download the translation files in the current project.\n\n\tOptions:\n\n --format=yml See documentation for list of allowed formats\n --target=./phrase/locales Target folder to store locale files\n --tag=foo Limit results to a given tag instead of all translations\n --updated-since=YYYYMMDDHHMMSS Limit results to translations updated after the given date (UTC)\n --include-empty-translations Include empty translations in the result\n --convert-emoji Convert Emoji symbols\n --encoding=utf-8 Convert .strings or .properties with alternate encoding\n --skip-unverified-translations Skip unverified translations in the result\n --secret=YOUR_AUTH_TOKEN The Auth Token to use for this operation instead of the saved one (optional)\n\t`\n\treturn strings.TrimSpace(helpText)\n}", "func pullExample() string {\n\treturn `$ pouch images\nIMAGE ID IMAGE NAME SIZE\nbbc3a0323522 docker.io/library/busybox:latest 703.14 KB\n$ pouch pull docker.io/library/redis:alpine\n$ pouch images\nIMAGE ID IMAGE NAME SIZE\nbbc3a0323522 docker.io/library/busybox:latest 703.14 KB\n0153c5db97e5 docker.io/library/redis:alpine 9.63 MB`\n}", "func (d *downloadCommand) Synopsis() string {\n\treturn \"Download a specific version of a product.\"\n}", "func CmdPullStory(c *cli.Context) {\n\n\tfrom := c.String(\"source\")\n\tsource, err := gitutil.LookupBranchSource(from, true)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Get repo instance\n\troot, _ := os.Getwd()\n\trepo, err := gitutil.GetRepo(root)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar remoteName string\n\n\t// Extract source's remote and branch names\n\tsources := strings.Split(source, \"/\")\n\tif len(sources) == 1 {\n\t\tremoteName = \"origin\"\n\t} else {\n\t\tremoteName = sources[0]\n\t}\n\n\t// Fetch from repo before pulling\n\tfmt.Printf(\"Fetching most recent with remote: `%s`\\n\", remoteName)\n\tif err = gitutil.Fetch(repo, remoteName); err != nil {\n\t\t// do not fail entire app even if fetch fails\n\t\tlog.Println(err)\n\t}\n\n\tfmt.Printf(\"Merging %s into local branch\\n\", source)\n\terr = gitutil.Pull(repo, source)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (d *downloadCommand) Help() string {\n\thelp := `Usage: hashicorp-releases download <product> <version>`\n\treturn help\n}", "func executePull(location string) {\n\tfmt.Println(\"Pulling from \" + location + \" ...\")\n}", "func (s Step) PullCommand() []string {\n\treturn []string{\"pull\", s.ImageName()}\n}", "func (c *SetupCommand) Synopsis() string {\n\treturn \"Utility to setup the elos command line interface\"\n}", "func Cmd(method, source string, args Options) ([]byte, error) {\n\treturn fetch.Cmd(fetch.Request{\n\t\tMethod: method,\n\t\tURL: fmt.Sprintf(\n\t\t\t\"http://api.pullword.com/%s.php?source=%s&param1=%d&param2=%d\",\n\t\t\tmethod,\n\t\t\tsource,\n\t\t\targs.Threshold,\n\t\t\targs.Debug,\n\t\t),\n\t})\n}", "func (c *Command) Synopsis() string {\n\treturn \"\"\n}", "func help() {\n\tfmt.Println(`\n usage:\n hping [-c count][-t timeout][-m method][-d data] url\n\n options:\t\t \n -c count Send 'count' requests (default: 4)\n -t timeout Specifies a time limit for requests in second (default is 2) \n -m method HTTP methods: GET/POST/HEAD (default: HEAD)\n -d data Sending the given data (text/json) (default: \"mylg\")\n\t`)\n}", "func (ShowCLI) Synopsis() string {\n\treturn \"Displays configured registration entries\"\n}", "func usage() {\n\tfmt.Println(\"Usage of ./client\")\n\tfmt.Println(\" -master=[master IP:Port] put [localfilename] [sdfsfilename]\")\n\tfmt.Println(\" -master=[master IP:Port] get [sdfsfilename] [localfilename]\")\n\tfmt.Println(\" -master=[master IP:Port] delete [sdfsfilename]\")\n\tfmt.Println(\" -master=[master IP:Port] ls [sdfsfilename]\")\n\tfmt.Println(\" -master=[master IP:Port] store\")\n\tfmt.Println(\" -master=[master IP:Port] get-versions [sdfsfilename] [num-versions] [localfilename]\")\n}", "func execSynopsis(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret := doc.Synopsis(args[0].(string))\n\tp.Ret(1, ret)\n}", "func (c *PullCommand) Run(args []string) int {\n\tcmdFlags := flag.NewFlagSet(\"pull\", flag.ContinueOnError)\n\tcmdFlags.Usage = func() { c.UI.Output(c.Help()) }\n\n\tconfig := c.Config\n\n\tcmdFlags.StringVar(&config.Secret, \"secret\", config.Secret, \"\")\n\tcmdFlags.StringVar(&config.TargetDirectory, \"target\", config.TargetDirectory, \"\")\n\tcmdFlags.StringVar(&config.Encoding, \"encoding\", config.Encoding, \"\")\n\tcmdFlags.StringVar(&config.Format, \"format\", config.Format, \"\")\n\n\treq := new(phrase.DownloadRequest)\n\tcmdFlags.StringVar(&req.Tag, \"tag\", \"\", \"\")\n\tvar updatedSince string\n\tcmdFlags.StringVar(&updatedSince, \"updated-since\", \"\", \"\")\n\tcmdFlags.BoolVar(&req.ConvertEmoji, \"convert-emoji\", false, \"\")\n\tcmdFlags.BoolVar(&req.SkipUnverifiedTranslations, \"skip-unverified-translations\", false, \"\")\n\tcmdFlags.BoolVar(&req.IncludeEmptyTranslations, \"include-empty-translations\", false, \"\")\n\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\tif updatedSince != \"\" {\n\t\tvar err error\n\t\treq.UpdatedSince, err = time.Parse(timeFormat, updatedSince)\n\t\tif err != nil {\n\t\t\tc.UI.Error(fmt.Sprintf(\"Error parsing updated-since (%s), format should be YYYYMMDDHHMMSS\", updatedSince))\n\t\t\treturn 1\n\t\t}\n\t}\n\n\tif config.Format == \"\" {\n\t\tconfig.Format = defaultDownloadFormat\n\t}\n\n\tc.API.AuthToken = config.Secret\n\treq.Encoding = config.Encoding\n\treq.Format = config.Format\n\n\tif err := config.Valid(); err != nil {\n\t\tc.UI.Error(err.Error())\n\t\treturn 1\n\t}\n\n\terr := c.fetch(req, cmdFlags.Args())\n\tif err != nil {\n\t\tc.UI.Error(fmt.Sprintf(\"Error encountered fetching the locales:\\n\\t%s\", err.Error()))\n\t\treturn 1\n\t}\n\treturn 0\n}", "func (p provision) Help() string {\n\treturn \"<start> Start the provision-api on 0.0.0.0:7000\"\n}", "func (p playbackAPI) Synopsis() string {\n\treturn \"Start the playback-api on 0.0.0.0:7001\"\n}", "func (i *ImageInspectCommand) example() string {\n\treturn `$ pouch image inspect docker.io/library/busybox\n{\n \"CreatedAt\": \"2017-12-21 04:30:57\",\n \"Digest\": \"sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0\",\n \"ID\": \"bbc3a0323522\",\n \"Name\": \"docker.io/library/busybox:latest\",\n \"Size\": 720019,\n \"Tag\": \"latest\"\n}`\n}", "func (c *config) pull(remote string, branch string) (output string, err error) {\n\tlog.Printf(\"pulling: %v/%v\", remote, branch)\n\n\tdefaultCommand := []string{\"pull\", remote, branch}\n\n\treturn c.command(defaultCommand...)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
add depth to all children
func (fi *finalizeFileInfo) addProperties(depth int) { fi.depth = depth for _, e := range fi.children { e.parent = fi e.addProperties(depth + 1) } }
[ "func (l *Link) DepthUpdate() error {\n\tfor _, child := range l.Children {\n\t\tif l.Depth+1 < child.Depth {\n\t\t\tchild.Depth = l.Depth + 1\n\t\t\tchild.DepthUpdate()\n\t\t}\n\t}\n\treturn nil\n}", "func updateDomTreeRecursive(element *Element, path []int) {\n\telement.Path = make([]int, len(path))\n\tcopy(element.Path, path)\n\n\t// TODO - handle componentmap update\n\t// if element.Component != nil {\n\t// \tcomponentMap[element.Component] = element\n\t// }\n\n\tfor i := 0; i < len(element.Children); i++ {\n\t\tchildPath := append(path, i)\n\t\tupdateDomTreeRecursive(&element.Children[i], childPath)\n\t}\n}", "func fixFields(n, parent *node, depth int) {\n\tn.parent = parent\n\tn.depth = depth\n\tfor _, c := range n.children {\n\t\tfixFields(c, n, depth+1)\n\t}\n}", "func AppendChildNodes(node StateNode, depth int, quit chan struct{}) StateNode {\n\tselect {\n\t\tcase <-quit:\n\t\t\treturn node\n\t\tdefault:\n\t\t\tif depth >= 0 {\n\t\t\t\tnode.Childs = []StateNode{}\n\t\t\t\tpiecesList := state.GetRemainingPiecesListFromState(node.State)\n\t\t\t\tboxList := grid.GetEmptyBoxes(node.State.Grid)\n\t\t\t\tif len(piecesList) > 0 && len(boxList) > 0 {\n\t\t\t\t\tfor j := 0; j < len(piecesList); j++ {\n\t\t\t\t\t\tif node.State.Piece == 0 {\n\t\t\t\t\t\t\tnextState := state.CopyState(node.State)\n\t\t\t\t\t\t\tnextState.Piece = piecesList[j]\n\t\t\t\t\t\t\tchildNode := InitNode(nextState)\n\t\t\t\t\t\t\tnode.Childs = append(node.Childs, AppendChildNodes(childNode, depth-1, quit))\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tfor i := 0; i < len(boxList); i++ {\n\t\t\t\t\t\t\t\tnextState := state.CopyState(node.State)\n\t\t\t\t\t\t\t\tnextState.Grid[boxList[i].Y][boxList[i].X] = node.State.Piece\n\t\t\t\t\t\t\t\tnextState.Piece = piecesList[j]\n\t\t\t\t\t\t\t\tchildNode := InitNode(nextState)\n\t\t\t\t\t\t\t\tnode.Childs = append(node.Childs, AppendChildNodes(childNode, depth-1, quit ))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn node\n\t}\n}", "func (r *Registry) SetRecursiveDepth(count int) {\n\tr.recursiveDepth = count\n}", "func (n Node) addChild(location []string, ref TreeReference, depth int64) {\n\tif innerNode, ok := n.Children[location[0]]; ok {\n\t\tinnerNode.addChild(location[1:], ref, depth+1)\n\t} else {\n\t\tn.Children[location[0]] = Node{\n\t\t\tReference: ref,\n\t\t\tChildren: make(Tree),\n\t\t\tDepth: depth,\n\t\t}\n\t}\n}", "func depth(node *Node, k int) int {\n\tif node.GetParent() == nil {\n\t\treturn k\n\t}\n\treturn depth(node.GetParent(), k+1)\n}", "func (t *ASCIITree) Add(children ...*ASCIITree) {\n\tfor _, child := range children {\n\t\tchild.Parent = t\n\t}\n\tt.children = append(t.children, children...)\n}", "func (o *GetHardwaresParams) SetDepth(depth *string) {\n\to.Depth = depth\n}", "func (mParams *EncodingMatrixLiteral) Depth(actual bool) (depth int) {\n\tif actual {\n\t\tdepth = len(mParams.ScalingFactor)\n\t} else {\n\t\tfor i := range mParams.ScalingFactor {\n\t\t\tfor range mParams.ScalingFactor[i] {\n\t\t\t\tdepth++\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (o *Options) MaxDepth() int { return o.maxDepth }", "func recursiveAddChildrenCapSum(tree *BlockTree, hash *wire.Hash, cap *big.Int) {\n\tfor _, childNode := range tree.children[*hash] {\n\t\tchildNode.CapSum.Add(childNode.CapSum, cap)\n\t\trecursiveAddChildrenCapSum(tree, childNode.Hash, cap)\n\t}\n}", "func (tree Tree) add(path string) int {\n\t// fmt.Println(\"adding \", path, \"to\", tree)\n\tcomponents := strings.Split(path[1:], \"/\", -1)\n\tadditions := 0\n\tpos := tree\n\tfor _, c := range components {\n\t\tchild, ok := pos[c]\n\t\tif !ok {\n\t\t\tchild = newTree()\n\t\t\tpos[c] = child\n\t\t\tadditions++\n\t\t}\n\t\tpos = child\n\t}\n\t// fmt.Printf(\"add %s -> %#v : %d\\n\", path, components, additions)\n\treturn additions\n}", "func walkTree2(node BTNode, level int) string {\n\n\tconst FMTGPARENT = `{\"type\":\"%s\",\"name\":\"%s\",\"status\":%d, \"n\":%d, \"children\":[%s]}`\n\tconst FMTGLEAF = `{\"type\":\"%s\",\"name\":\"%s\", \"status\":%d, \"n\":%d}`\n\n\tinfo := node.Info()\n\tstate := node.State()\n\n\tchildren := \"\"\n\tvar a []string\n\n\tswitch node.(type) {\n\tcase *SequenceNode:\n\t\tnodes := node.(*SequenceNode).Nodes\n\t\ta = make([]string, len(nodes))\n\t\tfor i, x := range nodes {\n\t\t\ta[i] = walkTree2(x, level+1)\n\t\t}\n\tcase *SelectorNode:\n\t\tnodes := node.(*SelectorNode).Nodes\n\t\ta = make([]string, len(nodes))\n\t\tfor i, x := range nodes {\n\t\t\ta[i] = walkTree2(x, level+1)\n\t\t}\n\tcase *DecoratorNode:\n\t\ta = make([]string, 1)\n\t\ta[0] = walkTree2(node.(*DecoratorNode).Node, level+1)\n\tcase *RootNode:\n\t\ta = make([]string, 1)\n\t\ta[0] = walkTree2(node.(*RootNode).Node, level+1)\n\tdefault:\n\t}\n\n\tif len(a) > 0 {\n\t\tchildren = fmt.Sprintf(FMTGPARENT, info.Type, info.Label, state.Status, state.N, strings.Join(a, \",\"))\n\t} else {\n\t\tchildren = fmt.Sprintf(FMTGLEAF, info.Type, info.Label, state.Status, state.N)\n\t}\n\n\treturn children\n\n}", "func processChildren(theReader *dwarf.Reader, depth int, canSkip bool) {\n\n\n\n\t// Process the children\n\tif (canSkip) {\n\t\ttheReader.SkipChildren();\n\t} else {\n\t\tfor {\n\t\t\ttheChild := readNextEntry(theReader);\n\t\t\tif (theChild == nil || theChild.Tag == 0) {\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\t\n\t\t\tprocessEntry(theReader, depth, theChild);\n\t\t}\n\t}\n}", "func (w *Walker) incrementActiveChildIndex() {\n\tif w.ActiveChildIndex()+1 <= w.ActiveNode().ChildTotal() {\n\t\tw.childIndex[w.currentDepth]++\n\t}\n}", "func trickleDepthInfo(node *h.FSNodeOverDag, maxlinks int) (depth int, repeatNumber int) {\n\tn := node.NumChildren()\n\n\tif n < maxlinks {\n\t\t// We didn't even added the initial `maxlinks` leaf nodes (`FillNodeLayer`).\n\t\treturn 0, 0\n\t}\n\n\tnonLeafChildren := n - maxlinks\n\t// The number of non-leaf child nodes added in `fillTrickleRec` (after\n\t// the `FillNodeLayer` call).\n\n\tdepth = nonLeafChildren/depthRepeat + 1\n\t// \"Deduplicate\" the added `depthRepeat` sub-graphs at each depth\n\t// (rounding it up since we may be on an unfinished depth with less\n\t// than `depthRepeat` sub-graphs).\n\n\trepeatNumber = nonLeafChildren % depthRepeat\n\t// What's left after taking full depths of `depthRepeat` sub-graphs\n\t// is the current `repeatNumber` we're at (this fractional part is\n\t// what we rounded up before).\n\n\treturn\n}", "func (o *OrderedChildren) Populate(children map[string]Inode) uint32 {\n\tvar links uint32\n\tfor name, child := range children {\n\t\tif child.Mode().IsDir() {\n\t\t\tlinks++\n\t\t}\n\t\tif err := o.insert(name, child, true); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Collision when attempting to insert child %q (%+v)\", name, child))\n\t\t}\n\t}\n\treturn links\n}", "func WithChildren() LogReadOption { return LogReadOption{withChildren: true} }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
New returns a new instance of an echo HTTP server
func New() Server { return &echoServer{ Instance: echo.New(), } }
[ "func (h *HTTP) New(address string) Server {\n\treturn &HTTP{\n\t\tServer: newRouter(address),\n\t}\n}", "func newServer() *server {\n\ts := &server{\n\t\trouter: echo.New(),\n\t}\n\n\ts.initHTTPServer()\n\ts.routes()\n\n\treturn s\n}", "func New(host, port string, h http.Handler) *WebServer {\n\tvar ws WebServer\n\n\tws.Addr = net.JoinHostPort(host, port)\n\tws.Handler = h\n\n\treturn &ws\n}", "func New() HelloServer {\n\thttp.DefaultServeMux = new(http.ServeMux)\n\treturn HelloServer{\n\t\t&http.Server{\n\t\t\tAddr: \":7100\",\n\t\t},\n\t}\n}", "func New(server *http.Server) (*Server, error) {\n\tlistener, err := zerodown.Listen(\"tcp\", server.Addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Server{\n\t\tserver: server,\n\t\tlistener: listener,\n\t}, nil\n}", "func New(addr string, host app.HostService, collector *metrics.Collector) app.Server {\n\treturn &server{\n\t\tsrv: telnet.Server{Addr: addr, Handler: nil},\n\t\thost: host,\n\t\tcollector: collector,\n\t}\n}", "func NewHttp() *Http {\n return &Http{}\n}", "func NewHTTP(port uint16, pachClientFactory func(ctx context.Context) *client.APIClient) *HTTP {\n\tmux := http.NewServeMux()\n\thandler := &Server{\n\t\tpachClientFactory: pachClientFactory,\n\t}\n\tmux.Handle(\"/archive/\", CSRFWrapper(handler))\n\tmux.Handle(\"/healthz\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(\"healthy\\n\")) //nolint:errcheck\n\t}))\n\treturn &HTTP{\n\t\tmux: mux,\n\t\tserver: &http.Server{\n\t\t\tAddr: fmt.Sprintf(\":%d\", port),\n\t\t\tHandler: mux,\n\t\t},\n\t}\n}", "func New(info Info) *WebServer {\n\trouter := bone.New()\n\t// Add more to this later on\n\treturn &WebServer{info.Listen + \":\" + info.Port, router}\n}", "func NewServer() *Server {}", "func NewHTTP(s Service) *HTTP { return &HTTP{s} }", "func NewHTTP(host string, port int) Static {\n\treturn Static{\n\t\tprotocol: ProtocolHTTP,\n\t\thost: host,\n\t\tport: port,\n\t}\n}", "func newHTTPServer(appConfig config.AppConfig, logger services.Logger) services.HTTPServer {\n\treturn services.NewDefaultHTTPServer(appConfig.Port, logger)\n}", "func New(c Config) *http.Server {\n\n\thandler := &RateLimitHandler{\n\t\trecords: map[string]*Record{},\n\t\tlimit: c.Limit,\n\t\twindow: c.Window,\n\t}\n\n\ts := &http.Server{\n\t\tAddr: fmt.Sprintf(\"0.0.0.0:%d\", c.Port),\n\t\tHandler: handler,\n\t}\n\treturn s\n}", "func New(host string) *service {\n\treturn &service{\n\t\thost: host,\n\t\tmux: http.NewServeMux(),\n\t}\n}", "func New(args []string) *Server {\n\treturn &Server{\n\t\targs: args,\n\t\twatchStopCh: make(chan os.Signal, 1),\n\t}\n}", "func New(addr string) (*Server, error) {\n\ts := &Server{\n\t\taddr: addr,\n\t\tshutdownTimeout: time.Minute,\n\t\tSessions: make(chan *Session),\n\t}\n\ts.hs = &http.Server{Handler: s}\n\tln, err := net.Listen(\"tcp\", s.addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts.ln = ln\n\ts.ListeningAddr = fmt.Sprintf(\":%d\", s.ln.Addr().(*net.TCPAddr).Port)\n\treturn s, nil\n}", "func newHTTPServer(cfg serverHTTPConfig, opts ...option) *httpServer {\n\topt := newOptions(opts...)\n\n\tsrv := &httpServer{\n\t\tcfg: cfg,\n\t}\n\n\topt.Logger = opt.Logger.With(zap.String(\"server\", srv.Name()))\n\n\tsrv.opt = opt\n\n\treturn srv\n}", "func New() (s *ZServer) {\n\ts = &ZServer{\n\t\tserver: &http.Server{\n\t\t\tAddr: \":8080\",\n\t\t},\n\t}\n\ts.init()\n\treturn\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewPodmanDriver returns a new DriverPlugin implementation
func NewPodmanDriver(logger hclog.Logger) drivers.DriverPlugin { ctx, cancel := context.WithCancel(context.Background()) return &Driver{ eventer: eventer.NewEventer(ctx, logger), config: &PluginConfig{}, tasks: newTaskStore(), ctx: ctx, signalShutdown: cancel, logger: logger.Named(pluginName), } }
[ "func NewPodmanDriver(logger hclog.Logger) drivers.DriverPlugin {\n\tctx, cancel := context.WithCancel(context.Background())\n\tlogger = logger.Named(pluginName)\n\treturn &Driver{\n\t\teventer: eventer.NewEventer(ctx, logger),\n\t\tconfig: &Config{},\n\t\ttasks: newTaskStore(),\n\t\tctx: ctx,\n\t\tsignalShutdown: cancel,\n\t\tlogger: logger,\n\t}\n}", "func newDevicePlugin(mode string, rootPath string) (*devicePlugin, error) {\n\tvar (\n\t\tdp *devicePlugin\n\t\terr error\n\t)\n\n\tsysfsPathOPAE := path.Join(rootPath, sysfsDirectoryOPAE)\n\tdevfsPath := path.Join(rootPath, devfsDirectory)\n\n\tif _, err = os.Stat(sysfsPathOPAE); os.IsNotExist(err) {\n\t\tsysfsPathDFL := path.Join(rootPath, sysfsDirectoryDFL)\n\t\tif _, err = os.Stat(sysfsPathDFL); os.IsNotExist(err) {\n\t\t\treturn nil, errors.Errorf(\"kernel driver is not loaded: neither %s nor %s sysfs entry exists\", sysfsPathOPAE, sysfsPathDFL)\n\t\t}\n\n\t\tdp, err = newDevicePluginDFL(sysfsPathDFL, devfsPath, mode)\n\t} else {\n\t\tdp, err = newDevicePluginOPAE(sysfsPathOPAE, devfsPath, mode)\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdp.newPort = fpga.NewPort\n\tdp.scanTicker = time.NewTicker(scanPeriod)\n\tdp.scanDone = make(chan bool, 1) // buffered as we may send to it before Scan starts receiving from it\n\n\treturn dp, nil\n}", "func NewPluginManager(config *spec.Config) (manager.Interface, error) {\n\tvar err error\n\tswitch *config.Flags.MigStrategy {\n\tcase spec.MigStrategyNone:\n\tcase spec.MigStrategySingle:\n\tcase spec.MigStrategyMixed:\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown strategy: %v\", *config.Flags.MigStrategy)\n\t}\n\n\tnvmllib := nvml.New()\n\n\tdeviceListStrategies, err := spec.NewDeviceListStrategies(*config.Flags.Plugin.DeviceListStrategy)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid device list strategy: %v\", err)\n\t}\n\n\tcdiEnabled := deviceListStrategies.IsCDIEnabled()\n\n\tcdiHandler, err := cdi.New(\n\t\tcdi.WithEnabled(cdiEnabled),\n\t\tcdi.WithDriverRoot(*config.Flags.Plugin.ContainerDriverRoot),\n\t\tcdi.WithTargetDriverRoot(*config.Flags.NvidiaDriverRoot),\n\t\tcdi.WithNvidiaCTKPath(*config.Flags.Plugin.NvidiaCTKPath),\n\t\tcdi.WithNvml(nvmllib),\n\t\tcdi.WithDeviceIDStrategy(*config.Flags.Plugin.DeviceIDStrategy),\n\t\tcdi.WithVendor(\"k8s.device-plugin.nvidia.com\"),\n\t\tcdi.WithGdsEnabled(*config.Flags.GDSEnabled),\n\t\tcdi.WithMofedEnabled(*config.Flags.MOFEDEnabled),\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create cdi handler: %v\", err)\n\t}\n\n\tm, err := manager.New(\n\t\tmanager.WithNVML(nvmllib),\n\t\tmanager.WithCDIEnabled(cdiEnabled),\n\t\tmanager.WithCDIHandler(cdiHandler),\n\t\tmanager.WithConfig(config),\n\t\tmanager.WithFailOnInitError(*config.Flags.FailOnInitError),\n\t\tmanager.WithMigStrategy(*config.Flags.MigStrategy),\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create plugin manager: %v\", err)\n\t}\n\n\tif err := m.CreateCDISpecFile(); err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create cdi spec file: %v\", err)\n\t}\n\n\treturn m, nil\n}", "func (pci PCILister) NewPlugin(vendorID string) dpm.PluginInterface {\n\tglog.V(3).Infof(\"Creating device plugin %s\", vendorID)\n\ts := strings.Split(vendorID, \"_\")\n\treturn &VFIODevicePlugin{\n\t\tvendorID: s[0],\n\t\tdeviceID: s[1],\n\t}\n}", "func newPluginProvider(pluginBinDir string, provider kubeletconfig.CredentialProvider) (*pluginProvider, error) {\n\tmediaType := \"application/json\"\n\tinfo, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unsupported media type %q\", mediaType)\n\t}\n\n\tgv, ok := apiVersions[provider.APIVersion]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"invalid apiVersion: %q\", provider.APIVersion)\n\t}\n\n\tclock := clock.RealClock{}\n\n\treturn &pluginProvider{\n\t\tclock: clock,\n\t\tmatchImages: provider.MatchImages,\n\t\tcache: cache.NewExpirationStore(cacheKeyFunc, &cacheExpirationPolicy{clock: clock}),\n\t\tdefaultCacheDuration: provider.DefaultCacheDuration.Duration,\n\t\tlastCachePurge: clock.Now(),\n\t\tplugin: &execPlugin{\n\t\t\tname: provider.Name,\n\t\t\tapiVersion: provider.APIVersion,\n\t\t\tencoder: codecs.EncoderForVersion(info.Serializer, gv),\n\t\t\tpluginBinDir: pluginBinDir,\n\t\t\targs: provider.Args,\n\t\t\tenvVars: provider.Env,\n\t\t\tenviron: os.Environ,\n\t\t},\n\t}, nil\n}", "func NewPlugin(opts ...Option) *Plugin {\n\tp := &Plugin{}\n\n\tp.SetName(\"generator\")\n\tp.KVStore = &etcd.DefaultPlugin\n\tp.KVScheduler = &kvscheduler.DefaultPlugin\n\n\tfor _, o := range opts {\n\t\to(p)\n\t}\n\n\tp.Setup()\n\n\treturn p\n}", "func newPlugin() (p *slackscot.Plugin) {\n\tp = new(slackscot.Plugin)\n\tp.Name = \"tester\"\n\tp.Commands = []slackscot.ActionDefinition{{\n\t\tMatch: func(m *slackscot.IncomingMessage) bool {\n\t\t\treturn strings.HasPrefix(m.NormalizedText, \"make\")\n\t\t},\n\t\tUsage: \"make `<something>`\",\n\t\tDescription: \"Have the test bot make something for you\",\n\t\tAnswer: func(m *slackscot.IncomingMessage) *slackscot.Answer {\n\t\t\treturn &slackscot.Answer{Text: \"Ready\"}\n\t\t},\n\t}}\n\n\treturn p\n}", "func NewPlugin(proto, path string, params ...string) *Plugin {\n\tif proto != \"unix\" && proto != \"tcp\" {\n\t\tpanic(\"Invalid protocol. Specify 'unix' or 'tcp'.\")\n\t}\n\tp := &Plugin{\n\t\texe: path,\n\t\tproto: proto,\n\t\tparams: params,\n\t\tinitTimeout: 2 * time.Second,\n\t\texitTimeout: 2 * time.Second,\n\t\thandler: NewDefaultErrorHandler(),\n\t\tmeta: meta(\"pingo\" + randstr(5)),\n\t\tobjsCh: make(chan *objects),\n\t\tconnCh: make(chan *conn),\n\t\tkillCh: make(chan *waiter),\n\t\texitCh: make(chan struct{}),\n\t}\n\treturn p\n}", "func newDriver() *driver {\n\treturn &driver{\n\t\tnetworks: map[string]*bridgeNetwork{},\n\t\tportAllocator: portallocator.Get(),\n\t}\n}", "func newPluginContainer() PluginContainer {\n\treturn new(pluginContainer)\n}", "func NewPluginCommand(cmd *cobra.Command, dockerCli *client.DockerCli) {\n}", "func NewPlugin(config *common.PluginConfig) (NetPlugin, error) {\n\t// Setup base plugin.\n\tplugin, err := cnm.NewPlugin(name, config.Version, endpointType)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnl := netlink.NewNetlink()\n\t// Setup network manager.\n\tnm, err := network.NewNetworkManager(nl, platform.NewExecClient(), &netio.NetIO{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig.NetApi = nm\n\n\treturn &netPlugin{\n\t\tPlugin: plugin,\n\t\tscope: scope,\n\t\tnm: nm,\n\t}, nil\n}", "func NewDriver() godfish.Driver { return &driver{} }", "func (p *PodmanTestIntegration) Podman(args []string) *PodmanSessionIntegration {\n\tpodmanSession := p.PodmanBase(args, false, false)\n\treturn &PodmanSessionIntegration{podmanSession}\n}", "func NewPlugin() container.Plugin {\n\treturn &plugin{}\n}", "func (*manager) PluginType() string { return base.PluginTypeDriver }", "func newPodManager(kClient kubernetes.Interface, policy osdnPolicy, overlayMTU uint32, routableMTU uint32, ovs *ovsController) *podManager {\n\tpm := newDefaultPodManager()\n\tpm.kClient = kClient\n\tpm.policy = policy\n\tpm.overlayMTU = overlayMTU\n\tpm.routableMTU = routableMTU\n\tpm.podHandler = pm\n\tpm.ovs = ovs\n\treturn pm\n}", "func NewPlugin() (shared.Plugin, error) {\n\treturn instance, nil\n}", "func Plugin(replayLayout *device.MemoryLayout) compiler.Plugin {\n\treturn &replayer{replayLayout: replayLayout}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
PluginInfo returns metadata about the podman driver plugin
func (d *Driver) PluginInfo() (*base.PluginInfoResponse, error) { return pluginInfo, nil }
[ "func ParlayPluginInfo() string {\n\treturn pluginInfo\n}", "func PluginInfo() map[string]string {\n\treturn map[string]string{\n\t\t\"pluginAPIVersion\": \"0.1.0\",\n\t\t\"type\": \"connector.tcp\",\n\t\t\"id\": \"example-tcp-connector\",\n\t\t\"description\": \"Example TCP Connector Plugin\",\n\t}\n}", "func (m *MyPlugin) Info() *plugins.Info {\n\treturn &plugins.Info{\n\t\tID: 999,\n\t\tName: \"extractor-example\",\n\t\tDescription: \"An Extractor Plugin Example\",\n\t\tContact: \"github.com/falcosecurity/plugin-sdk-go/\",\n\t\tVersion: \"0.1.0\",\n\t\tRequiredAPIVersion: \"0.2.0\",\n\t\tExtractEventSources: []string{\"example\"},\n\t}\n}", "func (m *Manager) PluginInfo(modulePath string) compat.Info {\n\tm.mutex.RLock()\n\tdefer m.mutex.RUnlock()\n\n\tif p, ok := m.plugins[modulePath]; ok {\n\t\treturn p.PluginInfo()\n\t}\n\tfmt.Println(\"Could not get plugin info for\", modulePath)\n\treturn compat.Info{\n\t\tName: \"UNKNOWN\",\n\t\tModulePath: modulePath,\n\t\tDescription: \"Oops something went wrong\",\n\t}\n}", "func getPluginInfo(ctx context.Context, plugin string) (*pluginsAPIPlugin, error) {\n\turl := fmt.Sprintf(pluginAPIURL, plugin)\n\treq, err := http.NewRequestWithContext(ctx, \"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"NewRequest(GET %q): %w\", url, err)\n\t}\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Client.Do(GET %q): %w\", url, err)\n\t}\n\tdefer res.Body.Close()\n\tswitch res.StatusCode {\n\tcase 200:\n\tcase 404:\n\t\treturn nil, fmt.Errorf(\"plugin not found\")\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unexpected response %v\", res.Status)\n\t}\n\n\tpi := pluginsAPIPlugin{}\n\terr = json.NewDecoder(res.Body).Decode(&pi)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Decode(%q): %w\", url, err)\n\t}\n\treturn &pi, nil\n}", "func (k *xyzProvider) GetPluginInfo(context.Context, *pbempty.Empty) (*pulumirpc.PluginInfo, error) {\n\treturn &pulumirpc.PluginInfo{\n\t\tVersion: k.version,\n\t}, nil\n}", "func (p *componentProvider) GetPluginInfo(context.Context, *pbempty.Empty) (*pulumirpc.PluginInfo, error) {\n\treturn &pulumirpc.PluginInfo{\n\t\tVersion: p.version,\n\t}, nil\n}", "func GetGotifyPluginInfo() plugin.Info {\n\treturn plugin.Info{\n\t\tModulePath: \"github.com/gotify/plugin-template\",\n\t\tVersion: \"1.0.0\",\n\t\tAuthor: \"Your Name\",\n\t\tWebsite: \"https://gotify.net/docs/plugin\",\n\t\tDescription: \"An example plugin with travis-ci building\",\n\t\tLicense: \"MIT\",\n\t\tName: \"gotify/plugin-template\",\n\t}\n}", "func (a *Auth) generatePluginInfo(pluginURL string) (PluginInfo, error) {\n\tpluginID, err := generateRandomString(32)\n\tif err != nil {\n\t\treturn PluginInfo{}, errors.New(\"system error: could not generate plugin ID\")\n\t}\n\n\tt := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{\n\t\t\"url\": pluginURL,\n\t\t\"id\": pluginID,\n\t})\n\tapiToken, err := t.SignedString(a.privKey)\n\tif err != nil {\n\t\treturn PluginInfo{}, err\n\t}\n\n\ta.logger.Debug(\"Generated new plugin auth\", zap.String(\"url\", pluginURL), zap.String(\"id\", pluginID))\n\n\treturn PluginInfo{\n\t\tPluginID: pluginID,\n\t\tPluginURL: pluginURL,\n\t\tAPIToken: apiToken,\n\t}, nil\n}", "func (*manager) PluginType() string { return base.PluginTypeDevice }", "func (*manager) PluginType() string { return base.PluginTypeDriver }", "func (*PluginInfoResponse) Descriptor() ([]byte, []int) {\n\treturn file_odpf_optimus_plugins_base_proto_rawDescGZIP(), []int{1}\n}", "func (c *DlaasPlugin) GetMetadata() plugin.PluginMetadata {\n\n\tcommands := make([]plugin.Command, len(metadata.Commands))\n\n\tfor index, command := range metadata.Commands {\n\t\tcommands[index] = plugin.Command{\n\t\t\tNamespace: command.Namespace,\n\t\t\tName: command.Name,\n\t\t\tDescription: command.Description,\n\t\t\tUsage: command.Usage,\n\t\t\tFlags: command.PluginFlags,\n\t\t}\n\t}\n\n\treturn plugin.PluginMetadata{\n\t\tName: pluginName,\n\t\tVersion: plugin.VersionType{\n\t\t\tMajor: 1,\n\t\t\tMinor: 0,\n\t\t\tBuild: 0,\n\t\t},\n\t\tMinCliVersion: plugin.VersionType{\n\t\t\tMajor: 0,\n\t\t\tMinor: 4,\n\t\t\tBuild: 0,\n\t\t},\n\t\tNamespaces: []plugin.Namespace{\n\t\t\t{\n\t\t\t\tName: dlNamespace,\n\t\t\t\tDescription: \"Manage deep learning models on Bluemix\",\n\t\t\t},\n\t\t},\n\t\tCommands: commands,\n\t}\n}", "func (p *plugin) VendorInfo() *spi.VendorInfo {\n\treturn &spi.VendorInfo{\n\t\tInterfaceSpec: spi.InterfaceSpec{\n\t\t\tName: \"infrakit-instance-vSphere\",\n\t\t\tVersion: \"0.6.0\",\n\t\t},\n\t\tURL: \"https://github.com/docker/infrakit\",\n\t}\n}", "func (p *plugin) VendorInfo() *spi.VendorInfo {\n\treturn &spi.VendorInfo{\n\t\tInterfaceSpec: spi.InterfaceSpec{\n\t\t\tName: \"infrakit-instance-sl\",\n\t\t\tVersion: \"0.3.0\",\n\t\t},\n\t\tURL: \"https://github.com/HerrHuber/softlayer_instance_plugin\",\n\t}\n}", "func (d *PluginListDescriber) Describe(ctx context.Context, namespace string, options describer.Options) (component.ContentResponse, error) {\n\tpluginStore := options.PluginManager().Store()\n\n\tlist := component.NewList(\"Plugins\", nil)\n\ttableCols := component.NewTableCols(\"Name\", \"Description\", \"Capabilities\")\n\ttbl := component.NewTable(\"Plugins\", \"There are no plugins!\", tableCols)\n\tlist.Add(tbl)\n\n\tfor _, n := range pluginStore.ClientNames() {\n\t\tmetadata, err := pluginStore.GetMetadata(n)\n\t\tif err != nil {\n\t\t\treturn component.EmptyContentResponse, errors.New(\"metadata is nil\")\n\t\t}\n\n\t\tvar summaryItems []string\n\t\tif metadata.Capabilities.IsModule {\n\t\t\tsummaryItems = append(summaryItems, \"Module\")\n\t\t}\n\n\t\tif actionNames := metadata.Capabilities.ActionNames; len(actionNames) > 0 {\n\t\t\tsummaryItems = append(summaryItems, fmt.Sprintf(\"Actions: %s\",\n\t\t\t\tstrings.Join(actionNames, \", \")))\n\t\t}\n\n\t\tin := []struct {\n\t\t\tname string\n\t\t\tlist []schema.GroupVersionKind\n\t\t}{\n\t\t\t{name: \"Object Status\", list: metadata.Capabilities.SupportsObjectStatus},\n\t\t\t{name: \"Printer Config\", list: metadata.Capabilities.SupportsPrinterConfig},\n\t\t\t{name: \"Printer Items\", list: metadata.Capabilities.SupportsPrinterItems},\n\t\t\t{name: \"Printer Status\", list: metadata.Capabilities.SupportsPrinterStatus},\n\t\t\t{name: \"Tab\", list: metadata.Capabilities.SupportsTab},\n\t\t}\n\n\t\tfor _, item := range in {\n\t\t\tsupport, ok := summarizeSupports(item.name, item.list)\n\t\t\tif ok {\n\t\t\t\tsummaryItems = append(summaryItems, support)\n\t\t\t}\n\t\t}\n\n\t\tvar sb strings.Builder\n\t\tfor i := range summaryItems {\n\t\t\tsb.WriteString(fmt.Sprintf(\"[%s]\", summaryItems[i]))\n\t\t\tif i < len(summaryItems)-1 {\n\t\t\t\tsb.WriteString(\", \")\n\t\t\t}\n\t\t}\n\n\t\trow := component.TableRow{\n\t\t\t\"Name\": component.NewText(metadata.Name),\n\t\t\t\"Description\": component.NewText(metadata.Description),\n\t\t\t\"Capabilities\": component.NewText(sb.String()),\n\t\t}\n\t\ttbl.Add(row)\n\t}\n\n\ttbl.Sort(\"Name\", false)\n\n\treturn component.ContentResponse{\n\t\tComponents: []component.Component{list},\n\t}, nil\n}", "func DescribePlugins() string {\n\tpl := ListPlugins()\n\n\tstr := \"Server types:\\n\"\n\tfor _, name := range pl[\"server_types\"] {\n\t\tstr += \" \" + name + \"\\n\"\n\t}\n\n\tstr += \"\\nCaddyfile loaders:\\n\"\n\tfor _, name := range pl[\"caddyfile_loaders\"] {\n\t\tstr += \" \" + name + \"\\n\"\n\t}\n\n\tif len(pl[\"event_hooks\"]) > 0 {\n\t\tstr += \"\\nEvent hook plugins:\\n\"\n\t\tfor _, name := range pl[\"event_hooks\"] {\n\t\t\tstr += \" hook.\" + name + \"\\n\"\n\t\t}\n\t}\n\n\tif len(pl[\"clustering\"]) > 0 {\n\t\tstr += \"\\nClustering plugins:\\n\"\n\t\tfor _, name := range pl[\"clustering\"] {\n\t\t\tstr += \" \" + name + \"\\n\"\n\t\t}\n\t}\n\n\tstr += \"\\nOther plugins:\\n\"\n\tfor _, name := range pl[\"others\"] {\n\t\tstr += \" \" + name + \"\\n\"\n\t}\n\n\treturn str\n}", "func (q *QueryResolver) RetentionPluginInfo(ctx context.Context, args retentionPluginInfoArgs) (*PluginInfoResolver, error) {\n\tresp, err := q.Env.PluginServer.GetRetentionPluginInfo(ctx, &cloudpb.GetRetentionPluginInfoRequest{\n\t\tPluginId: args.ID,\n\t\tVersion: args.PluginVersion,\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfigs := make([]PluginConfigResolver, 0)\n\n\tfor k, v := range resp.Configs {\n\t\tconfigs = append(configs, PluginConfigResolver{\n\t\t\tName: k,\n\t\t\tDescription: v,\n\t\t})\n\t}\n\n\treturn &PluginInfoResolver{\n\t\tConfigs: configs,\n\t\tAllowCustomExportURL: resp.AllowCustomExportURL,\n\t\tAllowInsecureTLS: resp.AllowInsecureTLS,\n\t\tDefaultExportURL: resp.DefaultExportURL,\n\t}, nil\n}", "func (c *ClientWithResponses) GetKubectlPluginDetailsWithResponse(ctx context.Context, repoName RepoNameParam, packageName PackageNameParam, reqEditors ...RequestEditorFn) (*GetKubectlPluginDetailsResponse, error) {\n\trsp, err := c.GetKubectlPluginDetails(ctx, repoName, packageName, reqEditors...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseGetKubectlPluginDetailsResponse(rsp)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ConfigSchema function allows a plugin to tell Nomad the schema for its configuration. This configuration is given in a plugin block of the client configuration. The schema is defined with the hclspec package.
func (d *Driver) ConfigSchema() (*hclspec.Spec, error) { return configSpec, nil }
[ "func (d *NvidiaDevice) ConfigSchema() (*hclspec.Spec, error) {\n\treturn configSpec, nil\n}", "func hookConfigurationSchema() *schema.Schema {\n\treturn &schema.Schema{\n\t\tType: schema.TypeList,\n\t\tOptional: true,\n\t\tMaxItems: 1,\n\t\tElem: &schema.Resource{\n\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\"invocation_condition\": func() *schema.Schema {\n\t\t\t\t\tschema := documentAttributeConditionSchema()\n\t\t\t\t\treturn schema\n\t\t\t\t}(),\n\t\t\t\t\"lambda_arn\": {\n\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\tRequired: true,\n\t\t\t\t\tValidateFunc: verify.ValidARN,\n\t\t\t\t},\n\t\t\t\t\"s3_bucket\": {\n\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\tRequired: true,\n\t\t\t\t\tValidateFunc: validation.All(\n\t\t\t\t\t\tvalidation.StringLenBetween(3, 63),\n\t\t\t\t\t\tvalidation.StringMatch(\n\t\t\t\t\t\t\tregexp.MustCompile(`[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]`),\n\t\t\t\t\t\t\t\"Must be a valid bucket name\",\n\t\t\t\t\t\t),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func tagMakeConfigSchema(tagAttrName schemaAttr) *schema.Schema {\n\treturn &schema.Schema{\n\t\tType: schema.TypeSet,\n\t\tOptional: true,\n\t\tElem: &schema.Schema{\n\t\t\tType: schema.TypeString,\n\t\t\tValidateFunc: validateTag,\n\t\t},\n\t}\n}", "func ImageConfigSchema() *gojsonschema.Schema {\n\treturn loadSchema(\"image-config.schema.json\")\n}", "func BackendSchema(factoryName string) (*config.ConfigurationSchema, error) {\n\tif backendRegistry[factoryName] == nil {\n\t\treturn nil, fmt.Errorf(\"The adapter %s is not registered Processor cannot be created\", factoryName)\n\t}\n\treturn backendRegistry[factoryName].configurationSchema, nil\n}", "func ClientConfig(m discovery.PluginMeta) *plugin.ClientConfig {\n\tlogger := hclog.New(&hclog.LoggerOptions{\n\t\tName: \"plugin\",\n\t\tLevel: hclog.Trace,\n\t\tOutput: os.Stderr,\n\t})\n\n\treturn &plugin.ClientConfig{\n\t\tCmd: exec.Command(m.Path),\n\t\tHandshakeConfig: Handshake,\n\t\tVersionedPlugins: VersionedPlugins,\n\t\tManaged: true,\n\t\tLogger: logger,\n\t\tAllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC},\n\t\tAutoMTLS: true,\n\t}\n}", "func (o TableExternalDataConfigurationOutput) Schema() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TableExternalDataConfiguration) *string { return v.Schema }).(pulumi.StringPtrOutput)\n}", "func GetClusterConfigSchema(extensionSchema string) string {\n\tvar clusterConfigSchema string\n\tif clusterConfigSchema == \"\" {\n\t\tclusterConfigSchema = fmt.Sprintf(ClusterConfigSpecSchemaTemplate, \"\")\n\t} else {\n\t\tclusterConfigSchema = fmt.Sprintf(ClusterConfigSpecSchemaTemplate, \",\"+extensionSchema)\n\t}\n\treturn fmt.Sprintf(V2SchemaTemplate, MetadataSchema, clusterConfigSchema, DefaultDefinitions)\n}", "func (p *hostingdeProvider) Schema(_ context.Context, _ provider.SchemaRequest, resp *provider.SchemaResponse) {\n\tresp.Schema = schema.Schema{\n\t\tAttributes: map[string]schema.Attribute{\n\t\t\t\"account_id\": schema.StringAttribute{\n\t\t\t\tDescription: \"Account ID for hosting.de API. May also be provided via HOSTINGDE_ACCOUNT_ID environment variable.\",\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"auth_token\": schema.StringAttribute{\n\t\t\t\tDescription: \"Auth token for hosting.de API. May also be provided via HOSTINGDE_AUTH_TOKEN environment variable.\",\n\t\t\t\tOptional: true,\n\t\t\t\tSensitive: true,\n\t\t\t},\n\t\t},\n\t}\n}", "func GenSchemaClusterNetworkingConfigV2(ctx context.Context) (github_com_hashicorp_terraform_plugin_framework_tfsdk.Schema, github_com_hashicorp_terraform_plugin_framework_diag.Diagnostics) {\n\treturn github_com_hashicorp_terraform_plugin_framework_tfsdk.Schema{Attributes: map[string]github_com_hashicorp_terraform_plugin_framework_tfsdk.Attribute{\n\t\t\"id\": {\n\t\t\tComputed: true,\n\t\t\tOptional: false,\n\t\t\tPlanModifiers: []github_com_hashicorp_terraform_plugin_framework_tfsdk.AttributePlanModifier{github_com_hashicorp_terraform_plugin_framework_tfsdk.UseStateForUnknown()},\n\t\t\tRequired: false,\n\t\t\tType: github_com_hashicorp_terraform_plugin_framework_types.StringType,\n\t\t},\n\t\t\"kind\": {\n\t\t\tComputed: true,\n\t\t\tDescription: \"Kind is a resource kind\",\n\t\t\tOptional: true,\n\t\t\tPlanModifiers: []github_com_hashicorp_terraform_plugin_framework_tfsdk.AttributePlanModifier{github_com_hashicorp_terraform_plugin_framework_tfsdk.UseStateForUnknown()},\n\t\t\tType: github_com_hashicorp_terraform_plugin_framework_types.StringType,\n\t\t},\n\t\t\"metadata\": {\n\t\t\tAttributes: github_com_hashicorp_terraform_plugin_framework_tfsdk.SingleNestedAttributes(map[string]github_com_hashicorp_terraform_plugin_framework_tfsdk.Attribute{\n\t\t\t\t\"description\": {\n\t\t\t\t\tDescription: \"Description is object description\",\n\t\t\t\t\tOptional: true,\n\t\t\t\t\tType: github_com_hashicorp_terraform_plugin_framework_types.StringType,\n\t\t\t\t},\n\t\t\t\t\"expires\": {\n\t\t\t\t\tDescription: \"Expires is a global expiry time header can be set on any resource in the system.\",\n\t\t\t\t\tOptional: true,\n\t\t\t\t\tType: UseRFC3339Time(),\n\t\t\t\t\tValidators: []github_com_hashicorp_terraform_plugin_framework_tfsdk.AttributeValidator{MustTimeBeInFuture()},\n\t\t\t\t},\n\t\t\t\t\"labels\": {\n\t\t\t\t\tDescription: \"Labels is a set of labels\",\n\t\t\t\t\tOptional: true,\n\t\t\t\t\tType: github_com_hashicorp_terraform_plugin_framework_types.MapType{ElemType: github_com_hashicorp_terraform_plugin_framework_types.StringType},\n\t\t\t\t\tValidators: []github_com_hashicorp_terraform_plugin_framework_tfsdk.AttributeValidator{UseMapKeysPresentValidator(\"teleport.dev/origin\")},\n\t\t\t\t},\n\t\t\t\t\"namespace\": {\n\t\t\t\t\tComputed: true,\n\t\t\t\t\tDescription: \"Namespace is object namespace. The field should be called \\\"namespace\\\" when it returns in Teleport 2.4.\",\n\t\t\t\t\tOptional: true,\n\t\t\t\t\tPlanModifiers: []github_com_hashicorp_terraform_plugin_framework_tfsdk.AttributePlanModifier{github_com_hashicorp_terraform_plugin_framework_tfsdk.UseStateForUnknown()},\n\t\t\t\t\tType: github_com_hashicorp_terraform_plugin_framework_types.StringType,\n\t\t\t\t},\n\t\t\t}),\n\t\t\tDescription: \"Metadata is resource metadata\",\n\t\t\tOptional: true,\n\t\t},\n\t\t\"spec\": {\n\t\t\tAttributes: github_com_hashicorp_terraform_plugin_framework_tfsdk.SingleNestedAttributes(map[string]github_com_hashicorp_terraform_plugin_framework_tfsdk.Attribute{\n\t\t\t\t\"assist_command_execution_workers\": {\n\t\t\t\t\tDescription: \"AssistCommandExecutionWorkers determines the number of workers that will execute arbitrary Assist commands on servers in parallel\",\n\t\t\t\t\tOptional: true,\n\t\t\t\t\tType: github_com_hashicorp_terraform_plugin_framework_types.Int64Type,\n\t\t\t\t},\n\t\t\t\t\"client_idle_timeout\": {\n\t\t\t\t\tDescription: \"ClientIdleTimeout sets global cluster default setting for client idle timeouts.\",\n\t\t\t\t\tOptional: true,\n\t\t\t\t\tType: DurationType{},\n\t\t\t\t},\n\t\t\t\t\"idle_timeout_message\": {\n\t\t\t\t\tDescription: \"ClientIdleTimeoutMessage is the message sent to the user when a connection times out.\",\n\t\t\t\t\tOptional: true,\n\t\t\t\t\tType: github_com_hashicorp_terraform_plugin_framework_types.StringType,\n\t\t\t\t},\n\t\t\t\t\"keep_alive_count_max\": {\n\t\t\t\t\tComputed: true,\n\t\t\t\t\tDescription: \"KeepAliveCountMax is the number of keep-alive messages that can be missed before the server disconnects the connection to the client.\",\n\t\t\t\t\tOptional: true,\n\t\t\t\t\tPlanModifiers: []github_com_hashicorp_terraform_plugin_framework_tfsdk.AttributePlanModifier{github_com_hashicorp_terraform_plugin_framework_tfsdk.UseStateForUnknown()},\n\t\t\t\t\tType: github_com_hashicorp_terraform_plugin_framework_types.Int64Type,\n\t\t\t\t},\n\t\t\t\t\"keep_alive_interval\": {\n\t\t\t\t\tComputed: true,\n\t\t\t\t\tDescription: \"KeepAliveInterval is the interval at which the server sends keep-alive messages to the client.\",\n\t\t\t\t\tOptional: true,\n\t\t\t\t\tPlanModifiers: []github_com_hashicorp_terraform_plugin_framework_tfsdk.AttributePlanModifier{github_com_hashicorp_terraform_plugin_framework_tfsdk.UseStateForUnknown()},\n\t\t\t\t\tType: DurationType{},\n\t\t\t\t},\n\t\t\t\t\"proxy_listener_mode\": {\n\t\t\t\t\tDescription: \"ProxyListenerMode is proxy listener mode used by Teleport Proxies.\",\n\t\t\t\t\tOptional: true,\n\t\t\t\t\tType: github_com_hashicorp_terraform_plugin_framework_types.Int64Type,\n\t\t\t\t},\n\t\t\t\t\"proxy_ping_interval\": {\n\t\t\t\t\tDescription: \"ProxyPingInterval defines in which interval the TLS routing ping message should be sent. This is applicable only when using ping-wrapped connections, regular TLS routing connections are not affected.\",\n\t\t\t\t\tOptional: true,\n\t\t\t\t\tType: DurationType{},\n\t\t\t\t},\n\t\t\t\t\"routing_strategy\": {\n\t\t\t\t\tDescription: \"RoutingStrategy determines the strategy used to route to nodes.\",\n\t\t\t\t\tOptional: true,\n\t\t\t\t\tType: github_com_hashicorp_terraform_plugin_framework_types.Int64Type,\n\t\t\t\t},\n\t\t\t\t\"session_control_timeout\": {\n\t\t\t\t\tDescription: \"SessionControlTimeout is the session control lease expiry and defines the upper limit of how long a node may be out of contact with the auth server before it begins terminating controlled sessions.\",\n\t\t\t\t\tOptional: true,\n\t\t\t\t\tType: DurationType{},\n\t\t\t\t},\n\t\t\t\t\"tunnel_strategy\": {\n\t\t\t\t\tAttributes: github_com_hashicorp_terraform_plugin_framework_tfsdk.SingleNestedAttributes(map[string]github_com_hashicorp_terraform_plugin_framework_tfsdk.Attribute{\n\t\t\t\t\t\t\"agent_mesh\": {\n\t\t\t\t\t\t\tAttributes: github_com_hashicorp_terraform_plugin_framework_tfsdk.SingleNestedAttributes(map[string]github_com_hashicorp_terraform_plugin_framework_tfsdk.Attribute{\"active\": {\n\t\t\t\t\t\t\t\tComputed: true,\n\t\t\t\t\t\t\t\tDescription: \"Automatically generated field preventing empty message errors\",\n\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\tType: github_com_hashicorp_terraform_plugin_framework_types.BoolType,\n\t\t\t\t\t\t\t}}),\n\t\t\t\t\t\t\tDescription: \"\",\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"proxy_peering\": {\n\t\t\t\t\t\t\tAttributes: github_com_hashicorp_terraform_plugin_framework_tfsdk.SingleNestedAttributes(map[string]github_com_hashicorp_terraform_plugin_framework_tfsdk.Attribute{\"agent_connection_count\": {\n\t\t\t\t\t\t\t\tDescription: \"\",\n\t\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t\t\tType: github_com_hashicorp_terraform_plugin_framework_types.Int64Type,\n\t\t\t\t\t\t\t}}),\n\t\t\t\t\t\t\tDescription: \"\",\n\t\t\t\t\t\t\tOptional: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t}),\n\t\t\t\t\tDescription: \"TunnelStrategyV1 determines the tunnel strategy used in the cluster.\",\n\t\t\t\t\tOptional: true,\n\t\t\t\t},\n\t\t\t\t\"web_idle_timeout\": {\n\t\t\t\t\tDescription: \"WebIdleTimeout sets global cluster default setting for the web UI idle timeouts.\",\n\t\t\t\t\tOptional: true,\n\t\t\t\t\tType: DurationType{},\n\t\t\t\t},\n\t\t\t}),\n\t\t\tDescription: \"Spec is a ClusterNetworkingConfig specification\",\n\t\t\tOptional: true,\n\t\t},\n\t\t\"sub_kind\": {\n\t\t\tDescription: \"SubKind is an optional resource sub kind, used in some resources\",\n\t\t\tOptional: true,\n\t\t\tType: github_com_hashicorp_terraform_plugin_framework_types.StringType,\n\t\t},\n\t\t\"version\": {\n\t\t\tComputed: true,\n\t\t\tDescription: \"Version is a resource version\",\n\t\t\tOptional: true,\n\t\t\tPlanModifiers: []github_com_hashicorp_terraform_plugin_framework_tfsdk.AttributePlanModifier{github_com_hashicorp_terraform_plugin_framework_tfsdk.UseStateForUnknown()},\n\t\t\tType: github_com_hashicorp_terraform_plugin_framework_types.StringType,\n\t\t\tValidators: []github_com_hashicorp_terraform_plugin_framework_tfsdk.AttributeValidator{UseVersionBetween(2, 2)},\n\t\t},\n\t}}, nil\n}", "func (d *Driver) TaskConfigSchema() (*hclspec.Spec, error) {\n\treturn taskConfigSpec, nil\n}", "func convertPolicyConfigSchema(schema *plugin.AnalyzerPolicyConfigSchema) (*apitype.PolicyConfigSchema, error) {\n\tif schema == nil {\n\t\treturn nil, nil\n\t}\n\tproperties := map[string]*json.RawMessage{}\n\tfor k, v := range schema.Properties {\n\t\tbytes, err := json.Marshal(v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\traw := json.RawMessage(bytes)\n\t\tproperties[k] = &raw\n\t}\n\treturn &apitype.PolicyConfigSchema{\n\t\tType: apitype.Object,\n\t\tProperties: properties,\n\t\tRequired: schema.Required,\n\t}, nil\n}", "func (r PluginExecutor) MetadataSchema() json.RawMessage {\n\treturn r.metadataSchema\n}", "func autopilotConfigTableSchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"autopilot-config\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: true,\n\t\t\t\tUnique: true,\n\t\t\t\tIndexer: &memdb.ConditionalIndex{\n\t\t\t\t\tConditional: func(obj interface{}) (bool, error) { return true, nil },\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (d *Driver) TaskConfigSchema() (*hclspec.Spec, error) {\n\treturn api.TaskConfigSpec, nil\n}", "func AddConfigSchema(c interface {\n\tAddResource(url string, r io.Reader) error\n}) error {\n\treturn c.AddResource(ConfigSchemaID, bytes.NewBufferString(ConfigSchema))\n}", "func Schema1(c *Config) {\n\tc.schema1 = true\n}", "func setupSchema(cli *cli.Context) error {\n\tparams, err := parseConnectParams(cli)\n\tif err != nil {\n\t\treturn handleErr(schema.NewConfigError(err.Error()))\n\t}\n\tconn, err := newConn(params)\n\tif err != nil {\n\t\treturn handleErr(err)\n\t}\n\tdefer conn.Close()\n\tif err := schema.Setup(cli, conn); err != nil {\n\t\treturn handleErr(err)\n\t}\n\treturn nil\n}", "func (p *PostProcessor) ConfigSpec() hcldec.ObjectSpec { return p.config.FlatMapstructure().HCL2Spec() }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SetConfig function is called when starting the plugin for the first time. The Config given has two different configuration fields. The first PluginConfig, is an encoded configuration from the plugin block of the client config. The second, AgentConfig, is the Nomad agent's configuration which is given to all plugins.
func (d *Driver) SetConfig(cfg *base.Config) error { var pluginConfig PluginConfig if len(cfg.PluginConfig) != 0 { if err := base.MsgPackDecode(cfg.PluginConfig, &pluginConfig); err != nil { return err } } d.config = &pluginConfig if cfg.AgentConfig != nil { d.nomadConfig = cfg.AgentConfig.Driver } clientConfig := api.DefaultClientConfig() if pluginConfig.SocketPath != "" { clientConfig.SocketPath = pluginConfig.SocketPath } d.podman = api.NewClient(d.logger, clientConfig) return nil }
[ "func (d *Driver) SetConfig(cfg *base.Config) error {\n\tvar config Config\n\tif len(cfg.PluginConfig) != 0 {\n\t\tif err := base.MsgPackDecode(cfg.PluginConfig, &config); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Save the configuration to the plugin\n\td.config = &config\n\n\t// parse and validated any configuration value if necessary.\n\t//\n\t// If your driver agent configuration requires any complex validation\n\t// (some dependency between attributes) or special data parsing (the\n\t// string \"10s\" into a time.Interval) you can do it here and update the\n\t// value in d.config.\n\t//\n\t// we check if the hypervisor and uri specified by the user is\n\t// supported by the plugin.\n\thypervisor := strings.ToLower(d.config.Hypervisor)\n\turi := strings.ToLower(d.config.Uri)\n\tswitch hypervisor {\n\tcase \"qemu\":\n\t\td.logger.Debug(\"plugin config hypervisor: qemu\")\n\t\t// TODO need more accurate validation, like regexp\n\t\tif !strings.HasPrefix(uri, \"qemu\") {\n\t\t\treturn fmt.Errorf(\"invalid qemu uri %s\", d.config.Uri)\n\t\t}\n\t\td.logger.Debug(\"plugin config qemu uri: \", d.config.Uri)\n\n\t// TODO support other hypervisor\n\t//case \"xen\":\n\t//\td.logger.Debug(\"plugin config hypervisor: xen\")\n\n\tdefault:\n\t\treturn fmt.Errorf(\"invalid hypervisor %s\", d.config.Hypervisor)\n\t}\n\n\tif hypervisor != \"qemu\" && hypervisor != \"lxc\" {\n\t\treturn fmt.Errorf(\"invalid hypervisor %s\", d.config.Hypervisor)\n\t}\n\n\t// Save the Nomad agent configuration\n\tif cfg.AgentConfig != nil {\n\t\td.nomadConfig = cfg.AgentConfig.Driver\n\t}\n\n\t// TODO: initialize any extra requirements if necessary.\n\t//\n\t// Here you can use the config values to initialize any resources that are\n\t// shared by all tasks that use this driver, such as a daemon process.\n\n\treturn nil\n}", "func SetConfig(newConfig *AgentConfig) {\n\tconfig = newConfig\n}", "func SetConfig(c cfg.RPCConfig) {\n\tconfig = c\n}", "func (c *Client) SetConfig(conf *ClientConfig) (err error) {\n\tif conf == nil {\n\t\tconf = _defaultCliConf\n\t}\n\tif conf.Dial <= 0 {\n\t\tconf.Dial = time.Second * 10\n\t}\n\tif conf.Timeout <= 0 {\n\t\tconf.Timeout = time.Millisecond * 300\n\t}\n\tif conf.KeepAliveInterval <= 0 {\n\t\tconf.KeepAliveInterval = time.Second * 60\n\t}\n\tif conf.KeepAliveTimeout <= 0 {\n\t\tconf.KeepAliveTimeout = time.Second * 20\n\t}\n\tc.mutex.Lock()\n\tc.conf = conf\n\tc.mutex.Unlock()\n\treturn nil\n}", "func (c *Client) SetConfig(conf *ClientConfig) (err error) {\n\tif conf == nil {\n\t\tconf = _defaultConf\n\t}\n\tif conf.DialTimeout <= 0 {\n\t\tconf.DialTimeout = _defaultConf.DialTimeout\n\t}\n\tif conf.Timeout <= 0 {\n\t\tconf.Timeout = _defaultConf.Timeout\n\t}\n\tif conf.PoolSize <= 0 {\n\t\tconf.PoolSize = _defaultConf.PoolSize\n\t}\n\n\tc.mutex.Lock()\n\tc.conf = conf\n\tif c.breaker == nil {\n\t\tc.breaker = breaker.NewGroup(conf.Breaker)\n\t} else {\n\t\tc.breaker.Reload(conf.Breaker)\n\t}\n\tc.mutex.Unlock()\n\treturn nil\n}", "func SetConfig(c *Config) {\n\tconfig = c\n\treturn\n}", "func SetConfig(conf Config) {\n\tconfig = conf\n}", "func SetConfig(config Config) {\n defConf = config\n DefaultClient().config = config\n}", "func SetConfig(cfg c.Config) {\n\tconfig = cfg\n}", "func SetConfig(config *rest.Config) UpdateSettingsFunc {\n\treturn func(cache *clusterCache) {\n\t\tif !reflect.DeepEqual(cache.config, config) {\n\t\t\tlog.WithField(\"server\", cache.config.Host).Infof(\"Changing cluster config to: %v\", config)\n\t\t\tcache.config = config\n\t\t}\n\t}\n}", "func (bot *VkBot) SetConfig(cfg BotConfig) {\n\tbot.config = cfg\n}", "func (ce *MqttConfigExecutor) StartConfig(config *gateways.ConfigContext) {\n\tce.GatewayConfig.Log.Info().Str(\"config-key\", config.Data.Src).Msg(\"operating on configuration...\")\n\tm, err := parseConfig(config.Data.Config)\n\tif err != nil {\n\t\tconfig.ErrChan <- err\n\t}\n\tce.GatewayConfig.Log.Info().Str(\"config-key\", config.Data.Src).Interface(\"config-value\", *m).Msg(\"mqtt configuration\")\n\n\tgo ce.listenEvents(m, config)\n\n\tfor {\n\t\tselect {\n\t\tcase <-config.StartChan:\n\t\t\tconfig.Active = true\n\t\t\tce.GatewayConfig.Log.Info().Str(\"config-key\", config.Data.Src).Msg(\"configuration is running\")\n\n\t\tcase data := <-config.DataChan:\n\t\t\tce.GatewayConfig.DispatchEvent(&gateways.GatewayEvent{\n\t\t\t\tSrc: config.Data.Src,\n\t\t\t\tPayload: data,\n\t\t\t})\n\n\t\tcase <-config.StopChan:\n\t\t\tce.GatewayConfig.Log.Info().Str(\"config-name\", config.Data.Src).Msg(\"stopping configuration\")\n\t\t\tconfig.DoneChan <- struct{}{}\n\t\t\tce.GatewayConfig.Log.Info().Str(\"config-name\", config.Data.Src).Msg(\"configuration stopped\")\n\t\t\treturn\n\t\t}\n\t}\n}", "func (a AmbariRegistry) SetConfig(configType string, configKey string, configValue string) {\n\tfilter := Filter{}\n\tfilter.Server = true\n\tfilteredHosts := a.GetFilteredHosts(filter)\n\tversionNote := fmt.Sprintf(\"AMBARICTL - Update config key: %s\", configKey)\n\tcommand := fmt.Sprintf(\"/var/lib/ambari-server/resources/scripts/configs.py --action set -c %s -k %s -v %s \"+\n\t\t\"-u %s -p %s --host=%s --cluster=%s --protocol=%s -b '%s'\", configType, configKey, configValue, a.Username, a.Password,\n\t\ta.Hostname, a.Cluster, a.Protocol, versionNote)\n\ta.RunRemoteHostCommand(command, filteredHosts, filter.Server)\n}", "func SetConfig(cfg config.Config) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tconfig.ToContext(c, cfg)\n\t\tc.Next()\n\t}\n}", "func (p *Plugin) LoadConfig(location string) error {\n\tp.dir = location\n\tlog.Debug(\"Loading configuration from \" + location)\n\treturn nil\n}", "func (s *server) SetConfig(ctx context.Context, req *v1.SetConfigRequest) (*v1.SetConfigReply, error) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\ts.Debug(\"SetConfig request: %+v\", req)\n\n\treply := &v1.SetConfigReply{}\n\terr := s.setConfigCb(&RawConfig{NodeName: req.NodeName, Data: req.Config})\n\tif err != nil {\n\t\treply.Error = fmt.Sprintf(\"failed to apply configuration: %v\", err)\n\t}\n\n\treturn reply, nil\n}", "func (c *TunaSessionClient) SetConfig(conf *Config) error {\n\tc.Lock()\n\tdefer c.Unlock()\n\terr := mergo.Merge(c.config, conf, mergo.WithOverride)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif conf.TunaIPFilter != nil {\n\t\tc.config.TunaIPFilter = conf.TunaIPFilter\n\t}\n\tif conf.TunaNknFilter != nil {\n\t\tc.config.TunaNknFilter = conf.TunaNknFilter\n\t}\n\treturn nil\n}", "func (r *PriRepo) SetConfig(updated *config.Config) error {\n\tbytes, err := json.Marshal(updated)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nconf.SetDSConfig(string(bytes))\n}", "func (policy *PolicySvc) SetConfig(config common.ServiceConfig) error {\n\t// TODO this is a copy-paste of topology service, to refactor\n\tlog.Println(config)\n\tpolicy.config = config\n\t//\tstoreConfig := config.ServiceSpecific[\"store\"].(map[string]interface{})\n\tlog.Printf(\"Policy port: %d\", config.Common.Api.Port)\n\tpolicy.store = policyStore{}\n\tstoreConfig := config.ServiceSpecific[\"store\"].(map[string]interface{})\n\tpolicy.store.ServiceStore = &policy.store\n\treturn policy.store.SetConfig(storeConfig)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
TaskConfigSchema returns the schema for the driver configuration of the task.
func (d *Driver) TaskConfigSchema() (*hclspec.Spec, error) { return taskConfigSpec, nil }
[ "func (d *Driver) TaskConfigSchema() (*hclspec.Spec, error) {\n\treturn api.TaskConfigSpec, nil\n}", "func (d *Driver) ConfigSchema() (*hclspec.Spec, error) {\n\treturn configSpec, nil\n}", "func (d *NvidiaDevice) ConfigSchema() (*hclspec.Spec, error) {\n\treturn configSpec, nil\n}", "func GetUserConfigSchema(t string) map[string]interface{} {\n\tif _, ok := getUserConfigurationOptionsSchemaFilenames()[t]; !ok {\n\t\tlog.Panicf(\"user configuration options schema type `%s` is not available\", t)\n\t}\n\n\treturn userConfigSchemas[t]\n}", "func tagMakeConfigSchema(tagAttrName schemaAttr) *schema.Schema {\n\treturn &schema.Schema{\n\t\tType: schema.TypeSet,\n\t\tOptional: true,\n\t\tElem: &schema.Schema{\n\t\t\tType: schema.TypeString,\n\t\t\tValidateFunc: validateTag,\n\t\t},\n\t}\n}", "func BackendSchema(factoryName string) (*config.ConfigurationSchema, error) {\n\tif backendRegistry[factoryName] == nil {\n\t\treturn nil, fmt.Errorf(\"The adapter %s is not registered Processor cannot be created\", factoryName)\n\t}\n\treturn backendRegistry[factoryName].configurationSchema, nil\n}", "func (*GetTaskSchema) Descriptor() ([]byte, []int) {\n\treturn file_odpf_optimus_task_plugin_proto_rawDescGZIP(), []int{1}\n}", "func (o TableExternalDataConfigurationOutput) Schema() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TableExternalDataConfiguration) *string { return v.Schema }).(pulumi.StringPtrOutput)\n}", "func ImageConfigSchema() *gojsonschema.Schema {\n\treturn loadSchema(\"image-config.schema.json\")\n}", "func (j *AuroraJob) TaskConfig() *aurora.TaskConfig {\n\treturn j.jobConfig.TaskConfig\n}", "func (*TaskConfigs_Config) Descriptor() ([]byte, []int) {\n\treturn file_odpf_optimus_task_plugin_proto_rawDescGZIP(), []int{6, 0}\n}", "func autopilotConfigTableSchema() *memdb.TableSchema {\n\treturn &memdb.TableSchema{\n\t\tName: \"autopilot-config\",\n\t\tIndexes: map[string]*memdb.IndexSchema{\n\t\t\t\"id\": &memdb.IndexSchema{\n\t\t\t\tName: \"id\",\n\t\t\t\tAllowMissing: true,\n\t\t\t\tUnique: true,\n\t\t\t\tIndexer: &memdb.ConditionalIndex{\n\t\t\t\t\tConditional: func(obj interface{}) (bool, error) { return true, nil },\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func TaskConfiguration() TaskConfig {\n\treturn conf.TaskConfig\n}", "func DeploymentSchema() string {\n\treturn deploymentSchema\n}", "func GetClusterConfigSchema(extensionSchema string) string {\n\tvar clusterConfigSchema string\n\tif clusterConfigSchema == \"\" {\n\t\tclusterConfigSchema = fmt.Sprintf(ClusterConfigSpecSchemaTemplate, \"\")\n\t} else {\n\t\tclusterConfigSchema = fmt.Sprintf(ClusterConfigSpecSchemaTemplate, \",\"+extensionSchema)\n\t}\n\treturn fmt.Sprintf(V2SchemaTemplate, MetadataSchema, clusterConfigSchema, DefaultDefinitions)\n}", "func (*TaskConfigs) Descriptor() ([]byte, []int) {\n\treturn file_odpf_optimus_task_plugin_proto_rawDescGZIP(), []int{6}\n}", "func (t *Templates) GetSchema() ([]byte, error) {\n\tif t.Config.IsFeatureEnabled(experimental.FlagDependenciesV2) {\n\t\treturn t.fs.ReadFile(\"templates/v1.1.0.schema.json\")\n\t}\n\treturn t.fs.ReadFile(\"templates/schema.json\")\n}", "func (m modelPluginTestDevice) Schema() (map[string]*yang.Entry, error) {\n\treturn td1.UnzipSchema()\n}", "func (gr *GenericRecord) Schema() Schema {\n\treturn gr.schema\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fingerprint is called by the client when the plugin is started. It allows the driver to indicate its health to the client. The channel returned should immediately send an initial Fingerprint, then send periodic updates at an interval that is appropriate for the driver until the context is canceled.
func (d *Driver) Fingerprint(ctx context.Context) (<-chan *drivers.Fingerprint, error) { err := shelpers.Init() if err != nil { d.logger.Error("Could not init stats helper", "err", err) return nil, err } ch := make(chan *drivers.Fingerprint) go d.handleFingerprint(ctx, ch) return ch, nil }
[ "func (d *Driver) Fingerprint(ctx context.Context) (<-chan *drivers.Fingerprint, error) {\n\tch := make(chan *drivers.Fingerprint)\n\tgo d.handleFingerprint(ctx, ch)\n\treturn ch, nil\n}", "func (d *NvidiaDevice) Fingerprint(ctx context.Context) (<-chan *device.FingerprintResponse, error) {\n\toutCh := make(chan *device.FingerprintResponse)\n\tgo d.fingerprint(ctx, outCh)\n\treturn outCh, nil\n}", "func (d *Driver) handleFingerprint(ctx context.Context, ch chan<- *drivers.Fingerprint) {\n\td.logger.Debug(\"handleFingerprint called\")\n\tdefer close(ch)\n\n\t// Nomad expects the initial fingerprint to be sent immediately\n\tticker := time.NewTimer(0)\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-d.ctx.Done():\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\t// after the initial fingerprint we can set the proper fingerprint\n\t\t\t// period\n\t\t\tticker.Reset(fingerprintPeriod)\n\t\t\tch <- d.buildFingerprint()\n\t\t}\n\t}\n}", "func (i *instanceManager) handleFingerprint(fp *drivers.Fingerprint) {\n\tattrs := make(map[string]string, len(fp.Attributes))\n\tfor key, attr := range fp.Attributes {\n\t\tattrs[key] = attr.GoString()\n\t}\n\tdi := &structs.DriverInfo{\n\t\tAttributes: attrs,\n\t\tDetected: fp.Health != drivers.HealthStateUndetected,\n\t\tHealthy: fp.Health == drivers.HealthStateHealthy,\n\t\tHealthDescription: fp.HealthDescription,\n\t\tUpdateTime: time.Now(),\n\t}\n\ti.updateNodeFromDriver(i.id.Name, di)\n\n\t// log detected/undetected state changes after the initial fingerprint\n\ti.lastHealthStateMu.Lock()\n\tif i.hasFingerprinted {\n\t\tif i.lastHealthState != fp.Health {\n\t\t\ti.logger.Info(\"driver health state has changed\", \"previous\", i.lastHealthState, \"current\", fp.Health, \"description\", fp.HealthDescription)\n\t\t}\n\t}\n\ti.lastHealthState = fp.Health\n\ti.lastHealthStateMu.Unlock()\n\n\t// if this is the first fingerprint, mark that we have received it\n\tif !i.hasFingerprinted {\n\t\ti.logger.Debug(\"initial driver fingerprint\", \"health\", fp.Health, \"description\", fp.HealthDescription)\n\t\tclose(i.firstFingerprintCh)\n\t\ti.hasFingerprinted = true\n\t}\n}", "func (fm *FingerprintManager) dispenseDriverFingerprint(driverName string) (<-chan *drivers.Fingerprint, context.CancelFunc, error) {\n\tplug, err := fm.singletonLoader.Dispense(driverName, base.PluginTypeDriver, fm.getConfig().NomadPluginConfig(), fm.logger)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdriver, ok := plug.Plugin().(drivers.DriverPlugin)\n\tif !ok {\n\t\treturn nil, nil, fmt.Errorf(\"registered driver plugin %q does not implement DriverPlugin interface\", driverName)\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tfingerCh, err := driver.Fingerprint(ctx)\n\tif err != nil {\n\t\tcancel()\n\t\treturn nil, nil, err\n\t}\n\n\treturn fingerCh, cancel, nil\n}", "func (i *instanceManager) dispenseFingerprintCh() (<-chan *drivers.Fingerprint, context.CancelFunc, error) {\n\tdriver, err := i.dispense()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tctx, cancel := context.WithCancel(i.ctx)\n\tfingerCh, err := driver.Fingerprint(ctx)\n\tif err != nil {\n\t\tcancel()\n\t\treturn nil, nil, err\n\t}\n\n\treturn fingerCh, cancel, nil\n}", "func (i *instanceManager) WaitForFirstFingerprint(ctx context.Context) {\n\tselect {\n\tcase <-i.ctx.Done():\n\tcase <-ctx.Done():\n\tcase <-i.firstFingerprintCh:\n\t}\n}", "func (d *Driver) buildFingerprint() *drivers.Fingerprint {\n\tfingerprint := &drivers.Fingerprint{\n\t\tAttributes: map[string]*pstructs.Attribute{},\n\t\tHealth: drivers.HealthStateHealthy,\n\t\tHealthDescription: drivers.DriverHealthy,\n\t}\n\n\t// TODO: implement fingerprinting logic to populate health and driver\n\t// attributes.\n\t//\n\t// Fingerprinting is used by the plugin to relay two important information\n\t// to Nomad: health state and node attributes.\n\t//\n\t// If the plugin reports to be unhealthy, or doesn't send any fingerprint\n\t// data in the expected interval of time, Nomad will restart it.\n\t//\n\t// Node attributes can be used to report any relevant information about\n\t// the node in which the plugin is running (specific library availability,\n\t// installed versions of a software etc.). These attributes can then be\n\t// used by an operator to set job constrains.\n\t//\n\t// In the example below we check if the libvirt executive binary specified by the user exists\n\t// in the node.\n\tbinary := \"libvirtd\"\n\t// TODO: Need know node attributes\n\t// find specific d.config.hypervisor is present, like qemu -> 'qemu-system-x86_64'\n\n\tcmd := exec.Command(\"which\", binary)\n\tif err := cmd.Run(); err != nil {\n\t\treturn &drivers.Fingerprint{\n\t\t\tHealth: drivers.HealthStateUndetected,\n\t\t\tHealthDescription: fmt.Sprintf(\"libvirt %s not found\", binary),\n\t\t}\n\t}\n\n\t// We also set the libvirt and its version as attributes\n\tcmd = exec.Command(binary, \"--version\")\n\tif out, err := cmd.Output(); err != nil {\n\t\td.logger.Warn(\"failed to find libvirt version: %v\", err)\n\t} else {\n\t\tre := regexp.MustCompile(\"[0-9]\\\\.[0-9]\\\\.[0-9]\")\n\t\tversion := re.FindString(string(out))\n\n\t\tfingerprint.Attributes[driverVersionAttr] = structs.NewStringAttribute(version)\n\t\tfingerprint.Attributes[driverAttr] = structs.NewStringAttribute(binary)\n\t}\n\n\tif d.domainManager == nil || d.domainManager.IsManagerAlive() == false {\n\t\tfingerprint.Health = drivers.HealthStateUnhealthy\n\t\tfingerprint.HealthDescription = \"no libvirt connection\"\n\t}\n\treturn fingerprint\n}", "func (m *manager) fingerprint() {\n\tfor {\n\t\tselect {\n\t\tcase <-m.ctx.Done():\n\t\t\treturn\n\t\tcase <-m.fingerprintResCh:\n\t\t}\n\n\t\t// Collect the data\n\t\tvar fingerprinted []*device.DeviceGroup\n\t\tfor _, i := range m.instances {\n\t\t\tfingerprinted = append(fingerprinted, i.Devices()...)\n\t\t}\n\n\t\t// Convert and update\n\t\tout := make([]*structs.NodeDeviceResource, len(fingerprinted))\n\t\tfor i, f := range fingerprinted {\n\t\t\tout[i] = convertDeviceGroup(f)\n\t\t}\n\n\t\t// Call the updater\n\t\tm.updater(out)\n\t}\n}", "func (fm *FingerprintManager) processDriverFingerprint(fp *drivers.Fingerprint, driverName string) {\n\tdi := &structs.DriverInfo{\n\t\tAttributes: fp.Attributes,\n\t\tDetected: fp.Health != drivers.HealthStateUndetected,\n\t\tHealthy: fp.Health == drivers.HealthStateHealthy,\n\t\tHealthDescription: fp.HealthDescription,\n\t\tUpdateTime: time.Now(),\n\t}\n\tif n := fm.updateNodeFromDriver(driverName, di); n != nil {\n\t\tfm.setNode(n)\n\t}\n}", "func TestDriverManager_Fingerprint_Run(t *testing.T) {\n\tci.Parallel(t)\n\n\ttestClient, cleanup := TestClient(t, nil)\n\tdefer cleanup()\n\n\tconf := testClient.GetConfig()\n\tdm := drivermanager.New(&drivermanager.Config{\n\t\tLogger: testClient.logger,\n\t\tLoader: conf.PluginSingletonLoader,\n\t\tPluginConfig: conf.NomadPluginConfig(),\n\t\tUpdater: testClient.updateNodeFromDriver,\n\t\tEventHandlerFactory: testClient.GetTaskEventHandler,\n\t\tState: testClient.stateDB,\n\t})\n\n\tgo dm.Run()\n\tdefer dm.Shutdown()\n\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tnode := testClient.Node()\n\n\t\td, ok := node.Drivers[\"mock_driver\"]\n\t\tif !ok {\n\t\t\treturn false, fmt.Errorf(\"mock_driver driver is not present: %+v\", node.Drivers)\n\t\t}\n\n\t\tif !d.Detected || !d.Healthy {\n\t\t\treturn false, fmt.Errorf(\"mock_driver driver is not marked healthy: %+v\", d)\n\t\t}\n\n\t\treturn true, nil\n\t}, func(err error) {\n\t\trequire.NoError(t, err)\n\t})\n}", "func (fm *FingerprintManager) runFingerprint(f fingerprint.Fingerprint, period time.Duration, name string) {\n\tfm.logger.Debug(\"fingerprinting periodically\", \"fingerprinter\", name, \"period\", period)\n\n\ttimer := time.NewTimer(period)\n\tdefer timer.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\ttimer.Reset(period)\n\n\t\t\t_, err := fm.fingerprint(name, f)\n\t\t\tif err != nil {\n\t\t\t\tfm.logger.Debug(\"error periodic fingerprinting\", \"error\", err, \"fingerprinter\", name)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\tcase <-fm.shutdownCh:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (c *AccountsUpdateCall) Fingerprint(fingerprint string) *AccountsUpdateCall {\n\tc.params_.Set(\"fingerprint\", fmt.Sprintf(\"%v\", fingerprint))\n\treturn c\n}", "func TestDriverManager_Fingerprint_Periodic(t *testing.T) {\n\tci.Parallel(t)\n\n\ttestClient, cleanup := TestClient(t, func(c *config.Config) {\n\t\tpluginConfig := []*nconfig.PluginConfig{\n\t\t\t{\n\t\t\t\tName: \"mock_driver\",\n\t\t\t\tConfig: map[string]interface{}{\n\t\t\t\t\t\"shutdown_periodic_after\": true,\n\t\t\t\t\t\"shutdown_periodic_duration\": 2 * time.Second,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tc.PluginLoader = catalog.TestPluginLoaderWithOptions(t, \"\", map[string]string{}, pluginConfig)\n\n\t})\n\tdefer cleanup()\n\n\tconf := testClient.GetConfig()\n\tdm := drivermanager.New(&drivermanager.Config{\n\t\tLogger: testClient.logger,\n\t\tLoader: conf.PluginSingletonLoader,\n\t\tPluginConfig: conf.NomadPluginConfig(),\n\t\tUpdater: testClient.updateNodeFromDriver,\n\t\tEventHandlerFactory: testClient.GetTaskEventHandler,\n\t\tState: testClient.stateDB,\n\t})\n\n\tgo dm.Run()\n\tdefer dm.Shutdown()\n\n\t// we get a healthy mock_driver first\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tnode := testClient.Node()\n\n\t\td, ok := node.Drivers[\"mock_driver\"]\n\t\tif !ok {\n\t\t\treturn false, fmt.Errorf(\"mock_driver driver is not present: %+v\", node.Drivers)\n\t\t}\n\n\t\tif !d.Detected || !d.Healthy {\n\t\t\treturn false, fmt.Errorf(\"mock_driver driver is not marked healthy: %+v\", d)\n\t\t}\n\n\t\treturn true, nil\n\t}, func(err error) {\n\t\trequire.NoError(t, err)\n\t})\n\n\t// eventually, the mock_driver is marked as unhealthy\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tnode := testClient.Node()\n\n\t\td, ok := node.Drivers[\"mock_driver\"]\n\t\tif !ok {\n\t\t\treturn false, fmt.Errorf(\"mock_driver driver is not present: %+v\", node.Drivers)\n\t\t}\n\n\t\tif d.Detected || d.Healthy {\n\t\t\treturn false, fmt.Errorf(\"mock_driver driver is still marked as healthy: %+v\", d)\n\t\t}\n\n\t\treturn true, nil\n\t}, func(err error) {\n\t\trequire.NoError(t, err)\n\t})\n}", "func (f *CGroupFingerprint) Fingerprint(req *FingerprintRequest, resp *FingerprintResponse) error {\n\tmount, err := f.mountPointDetector.MountPoint()\n\tif err != nil {\n\t\tf.clearCGroupAttributes(resp)\n\t\treturn fmt.Errorf(\"failed to discover cgroup mount point: %s\", err)\n\t}\n\n\t// Check if a cgroup mount point was found.\n\tif mount == \"\" {\n\t\tf.clearCGroupAttributes(resp)\n\t\tif f.lastState == cgroupAvailable {\n\t\t\tf.logger.Warn(\"cgroups are now unavailable\")\n\t\t}\n\t\tf.lastState = cgroupUnavailable\n\t\treturn nil\n\t}\n\n\t// Check the version in use.\n\tversion := f.versionDetector.CgroupVersion()\n\n\tresp.AddAttribute(cgroupMountPointAttribute, mount)\n\tresp.AddAttribute(cgroupVersionAttribute, version)\n\tresp.Detected = true\n\n\tif f.lastState == cgroupUnavailable {\n\t\tf.logger.Info(\"cgroups are available\")\n\t}\n\tf.lastState = cgroupAvailable\n\treturn nil\n}", "func Fingerprint(scope *Scope, data tf.Output, method tf.Output) (fingerprint tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"Fingerprint\",\n\t\tInput: []tf.Input{\n\t\t\tdata, method,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func (c *AccountsContainersMacrosUpdateCall) Fingerprint(fingerprint string) *AccountsContainersMacrosUpdateCall {\n\tc.params_.Set(\"fingerprint\", fmt.Sprintf(\"%v\", fingerprint))\n\treturn c\n}", "func (c *AccountsContainersTriggersUpdateCall) Fingerprint(fingerprint string) *AccountsContainersTriggersUpdateCall {\n\tc.params_.Set(\"fingerprint\", fmt.Sprintf(\"%v\", fingerprint))\n\treturn c\n}", "func (u *User) RegisterFingerprint(fp string) (map[string]interface{}, error) {\n\tlog.info(\"========== REGISTER FINGERPRINT ==========\")\n\turl := buildURL(path[\"auth\"], u.UserID)\n\n\tu.request.fingerprint = fp\n\n\tdata := `{ \"refresh_token\": \"` + u.RefreshToken + `\" }`\n\n\tres, err := u.do(\"POST\", url, data, nil)\n\n\treturn res, err\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
RecoverTask detects running tasks when nomad client or task driver is restarted. When a driver is restarted it is not expected to persist any internal state to disk. To support this, Nomad will attempt to recover a task that was previously started if the driver does not recognize the task ID. During task recovery, Nomad calls RecoverTask passing the TaskHandle that was returned by the StartTask function.
func (d *Driver) RecoverTask(handle *drivers.TaskHandle) error { if handle == nil { return fmt.Errorf("error: handle cannot be nil") } if _, ok := d.tasks.Get(handle.Config.ID); ok { return nil } var taskState TaskState if err := handle.GetDriverState(&taskState); err != nil { return fmt.Errorf("failed to decode task state from handle: %v", err) } d.logger.Debug("Checking for recoverable task", "task", handle.Config.Name, "taskid", handle.Config.ID, "container", taskState.ContainerID) inspectData, err := d.podman.ContainerInspect(d.ctx, taskState.ContainerID) if err != nil { d.logger.Warn("Recovery lookup failed", "task", handle.Config.ID, "container", taskState.ContainerID, "err", err) return nil } h := &TaskHandle{ containerID: taskState.ContainerID, driver: d, taskConfig: taskState.TaskConfig, procState: drivers.TaskStateUnknown, startedAt: taskState.StartedAt, exitResult: &drivers.ExitResult{}, logger: d.logger.Named("podmanHandle"), totalCPUStats: stats.NewCpuStats(), userCPUStats: stats.NewCpuStats(), systemCPUStats: stats.NewCpuStats(), removeContainerOnExit: d.config.GC.Container, } if inspectData.State.Running { d.logger.Info("Recovered a still running container", "container", inspectData.State.Pid) h.procState = drivers.TaskStateRunning } else if inspectData.State.Status == "exited" { // are we allowed to restart a stopped container? if d.config.RecoverStopped { d.logger.Debug("Found a stopped container, try to start it", "container", inspectData.State.Pid) if err = d.podman.ContainerStart(d.ctx, inspectData.ID); err != nil { d.logger.Warn("Recovery restart failed", "task", handle.Config.ID, "container", taskState.ContainerID, "err", err) } else { d.logger.Info("Restarted a container during recovery", "container", inspectData.ID) h.procState = drivers.TaskStateRunning } } else { // no, let's cleanup here to prepare for a StartTask() d.logger.Debug("Found a stopped container, removing it", "container", inspectData.ID) if err = d.podman.ContainerStart(d.ctx, inspectData.ID); err != nil { d.logger.Warn("Recovery cleanup failed", "task", handle.Config.ID, "container", inspectData.ID) } h.procState = drivers.TaskStateExited } } else { d.logger.Warn("Recovery restart failed, unknown container state", "state", inspectData.State.Status, "container", taskState.ContainerID) h.procState = drivers.TaskStateUnknown } d.tasks.Set(taskState.TaskConfig.ID, h) go h.runContainerMonitor() d.logger.Debug("Recovered container handle", "container", taskState.ContainerID) return nil }
[ "func (d *Driver) RecoverTask(handle *drivers.TaskHandle) error {\n\tif handle == nil {\n\t\treturn fmt.Errorf(\"error: handle cannot be nil in RecoverTask\")\n\t}\n\n\tif _, ok := d.tasks.Get(handle.Config.ID); ok {\n\t\t// nothing to do if handle found in task store\n\t\treturn nil\n\t}\n\n\tvar taskState TaskState\n\tif err := handle.GetDriverState(&taskState); err != nil {\n\t\treturn fmt.Errorf(\"failed to decode driver task state: %v\", err)\n\t}\n\n\tvar driverConfig api.TaskConfig\n\tif err := taskState.TaskConfig.DecodeDriverConfig(&driverConfig); err != nil {\n\t\treturn fmt.Errorf(\"failed to decode driver config: %v\", err)\n\t}\n\n\t// create new handle from restored state from state db\n\t// libvirt doesn't track the creation/completion time of domains\n\t// so I'm tracking those myself\n\th := &taskHandle{\n\t\tdomainManager: d.domainManager,\n\t\tresultChan: make(chan *drivers.ExitResult),\n\t\ttask: handle.Config,\n\t\tstartedAt: taskState.startedAt,\n\t\tcompletedAt: taskState.completedAt,\n\t\texitResult: taskState.exitResult,\n\t}\n\n\t// set the in memory handle in task store\n\td.tasks.Set(handle.Config.ID, h)\n\n\treturn nil\n}", "func (s *Schedule) Recover() {\n\n\tif s.standByMode {\n\t\tfor _, t := range s.tasks {\n\t\t\tif interval, ok := s.cachedInterval[t.Name]; ok {\n\t\t\t\tt.interval = interval\n\t\t\t}\n\t\t\t// reset timer of the critical task, assume that it just completed\n\t\t\tif t.Name == s.standByTask.Name {\n\t\t\t\tt.timer = time.Now()\n\t\t\t\t// all the other tasks that were suspended need to run asap\n\t\t\t} else {\n\t\t\t\tt.timer = time.Now().Add(-t.interval)\n\t\t\t}\n\t\t}\n\t\t//s.cachedInterval = nil\n\t\ts.standByTask = nil\n\t\ts.standByMode = false\n\t\treturn\n\t}\n\tpanic(\"recover in non-standByMode\")\n}", "func (worker *WorkerClient) Recover(p *WorkerPool, ticket string, info *strandedCalInfo, param ...int) {\n\tif atomic.CompareAndSwapInt32(&worker.isUnderRecovery, 0, 1) {\n\t\tif logger.GetLogger().V(logger.Debug) {\n\t\t\tlogger.GetLogger().Log(logger.Debug, \"begin recover worker: \", worker.pid)\n\t\t}\n\t} else {\n\t\tif logger.GetLogger().V(logger.Debug) {\n\t\t\tlogger.GetLogger().Log(logger.Debug, \"worker already underrecovery: \", worker.pid)\n\t\t}\n\t\t//\n\t\t// defer will not be called.\n\t\t//\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif atomic.CompareAndSwapInt32(&worker.isUnderRecovery, 1, 0) {\n\t\t\tif logger.GetLogger().V(logger.Verbose) {\n\t\t\t\tlogger.GetLogger().Log(logger.Verbose, \"done recover worker: \", worker.pid)\n\t\t\t}\n\t\t} else {\n\t\t\t//\n\t\t\t// not possible. log in case.\n\t\t\t//\n\t\t\tif logger.GetLogger().V(logger.Warning) {\n\t\t\t\tlogger.GetLogger().Log(logger.Warning, \"exit recover worker (isUnderRecovery was 0 during a recovery): \", worker.pid)\n\t\t\t}\n\t\t}\n\t}()\n\tif worker.Status == wsAcpt {\n\t\tif logger.GetLogger().V(logger.Info) {\n\t\t\tlogger.GetLogger().Log(logger.Info, \"will not recover an idle worker\", worker.pid)\n\t\t}\n\t\treturn\n\t}\n\tpriorWorkerStatus := worker.Status\n\tworker.setState(wsQuce)\n\tkillparam := common.StrandedClientClose\n\tif len(param) > 0 {\n\t\tkillparam = param[0]\n\t}\n\tworker.callogStranded(\"RECOVERING\", info) // TODO: should we have this?\n\tworkerRecoverTimeout := worker.initiateRecover(killparam, p, priorWorkerStatus)\n\tfor {\n\t\tselect {\n\t\tcase <-workerRecoverTimeout:\n\t\t\tworker.thr.CanRun()\n\t\t\tworker.setState(wsInit) // Set the worker state to INIT when we decide to Terminate the worker\n\t\t\tworker.Terminate()\n\t\t\tworker.callogStranded(\"RECYCLED\", info)\n\t\t\treturn\n\t\tcase msg, ok := <-worker.channel():\n\t\t\tif !ok {\n\t\t\t\tif logger.GetLogger().V(logger.Debug) {\n\t\t\t\t\tlogger.GetLogger().Log(logger.Debug, \"Recover: worker closed, exiting\")\n\t\t\t\t}\n\t\t\t\tworker.callogStranded(\"EXITED\", info)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif logger.GetLogger().V(logger.Verbose) {\n\t\t\t\tlogger.GetLogger().Log(logger.Verbose, \"Recover (<<<worker pid = \", worker.pid, \"): \", msg.free, len(msg.data))\n\t\t\t}\n\t\t\t//\n\t\t\t// to avoid infinite loop ignore if worker asks for a restart again.\n\t\t\t//\n\t\t\tif msg.free {\n\t\t\t\tif msg.rqId != worker.rqId {\n\t\t\t\t\tif logger.GetLogger().V(logger.Verbose) {\n\t\t\t\t\t\tlogger.GetLogger().Log(logger.Verbose, \"worker pid <<<<\", worker.pid, \">>>>req id of worker:\", msg.rqId, \" and req id of mux:\", worker.rqId, \" does not match, Skip the EOR\")\n\t\t\t\t\t}\n\t\t\t\t\tevname := \"rrqId\"\n\t\t\t\t\tif (msg.rqId > worker.rqId) && ((worker.rqId > 10000) || (msg.rqId < 10000) /*rqId can wrap around to 0, this test checks that it did not just wrap*/) {\n\t\t\t\t\t\t// this is not expected, so log with different name\n\t\t\t\t\t\tevname = \"rrqId_Error\"\n\t\t\t\t\t}\n\t\t\t\t\te := cal.NewCalEvent(\"WARNING\", evname, cal.TransOK, \"\")\n\t\t\t\t\te.AddDataInt(\"mux\", int64(worker.rqId))\n\t\t\t\t\te.AddDataInt(\"wk\", int64(msg.rqId))\n\t\t\t\t\te.Completed()\n\t\t\t\t\t// don't return yet, we expect another EOR message, matching the rqId\n\t\t\t\t} else {\n\t\t\t\t\tif logger.GetLogger().V(logger.Info) {\n\t\t\t\t\t\tlogger.GetLogger().Log(logger.Info, \"stranded conn recovered\", worker.Type, worker.pid)\n\t\t\t\t\t}\n\t\t\t\t\tworker.callogStranded(\"RECOVERED\", info)\n\n\t\t\t\t\tworker.setState(wsFnsh)\n\t\t\t\t\tp.ReturnWorker(worker, ticket)\n\t\t\t\t\t//\n\t\t\t\t\t// donot set state to ACPT since worker could already be picked up by another\n\t\t\t\t\t// client into wsBusy, if that worker ends up recovering, it will not finish\n\t\t\t\t\t// recovery because of ACPT state. that worker will never get back to the pool\n\t\t\t\t\t//\n\t\t\t\t\t//worker.setState(ACPT)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func recoverTasks(\n\tctx context.Context,\n\tjobID *peloton.JobID,\n\tjobConfig *job.JobConfig,\n\ttaskInfos map[uint32]*task.TaskInfo,\n\tgoalStateDriver *driver) error {\n\tvar tasks []*task.TaskInfo\n\n\tcachedJob := goalStateDriver.jobFactory.AddJob(jobID)\n\tmaxRunningInstances := jobConfig.GetSLA().GetMaximumRunningInstances()\n\ttaskRuntimeInfoMap := make(map[uint32]*task.RuntimeInfo)\n\tfor i := uint32(0); i < jobConfig.InstanceCount; i++ {\n\t\tif _, ok := taskInfos[i]; ok {\n\t\t\ttaskInfo := &task.TaskInfo{\n\t\t\t\tJobId: jobID,\n\t\t\t\tInstanceId: i,\n\t\t\t\tRuntime: taskInfos[i].GetRuntime(),\n\t\t\t\tConfig: taskconfig.Merge(jobConfig.GetDefaultConfig(), jobConfig.GetInstanceConfig()[i]),\n\t\t\t}\n\n\t\t\tif taskInfos[i].GetRuntime().GetState() == task.TaskState_INITIALIZED {\n\t\t\t\t// Task exists, just send to resource manager\n\t\t\t\tif maxRunningInstances > 0 && taskInfos[i].GetRuntime().GetState() == task.TaskState_INITIALIZED {\n\t\t\t\t\t// add task to cache if not already present\n\t\t\t\t\tif cachedJob.GetTask(i) == nil {\n\t\t\t\t\t\tcachedJob.ReplaceTasks(\n\t\t\t\t\t\t\tmap[uint32]*task.TaskInfo{i: taskInfo},\n\t\t\t\t\t\t\tfalse,\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\t\t\t\t\t// run the runtime updater to start instances\n\t\t\t\t\tEnqueueJobWithDefaultDelay(\n\t\t\t\t\t\tjobID, goalStateDriver, cachedJob)\n\t\t\t\t} else {\n\t\t\t\t\ttasks = append(tasks, taskInfo)\n\t\t\t\t\t// add task to cache if not already present\n\t\t\t\t\tif cachedJob.GetTask(i) == nil {\n\t\t\t\t\t\treplaceTaskInfo := make(map[uint32]*task.TaskInfo)\n\t\t\t\t\t\treplaceTaskInfo[i] = taskInfo\n\t\t\t\t\t\tcachedJob.ReplaceTasks(taskInfos, false)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// Task does not exist in taskStore, create runtime and then send to resource manager\n\t\tlog.WithField(\"job_id\", jobID.GetValue()).\n\t\t\tWithField(\"task_instance\", i).\n\t\t\tInfo(\"Creating missing task\")\n\n\t\truntime := jobmgr_task.CreateInitializingTask(jobID, i, jobConfig)\n\t\ttaskRuntimeInfoMap[i] = runtime\n\n\t\tif maxRunningInstances == 0 {\n\t\t\ttaskInfo := &task.TaskInfo{\n\t\t\t\tJobId: jobID,\n\t\t\t\tInstanceId: i,\n\t\t\t\tRuntime: runtime,\n\t\t\t\tConfig: taskconfig.Merge(jobConfig.GetDefaultConfig(), jobConfig.GetInstanceConfig()[i]),\n\t\t\t}\n\t\t\ttasks = append(tasks, taskInfo)\n\t\t}\n\t}\n\n\tif err := cachedJob.CreateTaskRuntimes(ctx, taskRuntimeInfoMap, jobConfig.OwningTeam); err != nil {\n\t\tlog.WithError(err).\n\t\t\tWithField(\"job_id\", jobID.GetValue()).\n\t\t\tError(\"failed to create runtime for tasks\")\n\t\treturn err\n\t}\n\n\tif maxRunningInstances > 0 {\n\t\t// run the runtime updater to start instances\n\t\tEnqueueJobWithDefaultDelay(jobID, goalStateDriver, cachedJob)\n\t}\n\n\treturn sendTasksToResMgr(ctx, jobID, tasks, jobConfig, goalStateDriver)\n}", "func resetTask(ctx context.Context, taskId, caller string) error {\n\tt, err := task.FindOneId(taskId)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tif t.IsPartOfDisplay() {\n\t\treturn errors.Errorf(\"cannot restart execution task '%s' because it is part of a display task\", t.Id)\n\t}\n\tif err = t.Archive(); err != nil {\n\t\treturn errors.Wrap(err, \"can't restart task because it can't be archived\")\n\t}\n\n\tif err = MarkOneTaskReset(ctx, t); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tevent.LogTaskRestarted(t.Id, t.Execution, caller)\n\n\tif err := t.ActivateTask(caller); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\treturn errors.WithStack(UpdateBuildAndVersionStatusForTask(ctx, t))\n}", "func TryResetTask(ctx context.Context, settings *evergreen.Settings, taskId, user, origin string, detail *apimodels.TaskEndDetail) error {\n\tt, err := task.FindOneId(taskId)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tif t == nil {\n\t\treturn errors.Errorf(\"cannot restart task '%s' because it could not be found\", taskId)\n\t}\n\tif t.IsPartOfDisplay() {\n\t\treturn errors.Errorf(\"cannot restart execution task '%s' because it is part of a display task\", t.Id)\n\t}\n\n\tvar execTask *task.Task\n\n\tmaxExecution := evergreen.MaxTaskExecution\n\n\tif evergreen.IsCommitQueueRequester(t.Requester) && evergreen.IsSystemFailedTaskStatus(t.Status) {\n\t\tmaxSystemFailedTaskRetries := settings.CommitQueue.MaxSystemFailedTaskRetries\n\t\tif maxSystemFailedTaskRetries > 0 {\n\t\t\tmaxExecution = maxSystemFailedTaskRetries\n\t\t}\n\t}\n\t// if we've reached the max number of executions for this task, mark it as finished and failed\n\tif t.Execution >= maxExecution {\n\t\t// restarting from the UI bypasses the restart cap\n\t\tmsg := fmt.Sprintf(\"task '%s' reached max execution %d: \", t.Id, maxExecution)\n\t\tif origin == evergreen.UIPackage || origin == evergreen.RESTV2Package {\n\t\t\tgrip.Debugln(msg, \"allowing exception for\", user)\n\t\t} else if !t.IsFinished() {\n\t\t\tif detail != nil {\n\t\t\t\tgrip.Debugln(msg, \"marking as failed\")\n\t\t\t\tif t.DisplayOnly {\n\t\t\t\t\tfor _, etId := range t.ExecutionTasks {\n\t\t\t\t\t\texecTask, err = task.FindOneId(etId)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn errors.Wrap(err, \"finding execution task\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif err = MarkEnd(ctx, settings, execTask, origin, time.Now(), detail, false); err != nil {\n\t\t\t\t\t\t\treturn errors.Wrap(err, \"marking execution task as ended\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn errors.WithStack(MarkEnd(ctx, settings, t, origin, time.Now(), detail, false))\n\t\t\t} else {\n\t\t\t\tgrip.Critical(message.Fields{\n\t\t\t\t\t\"message\": \"TryResetTask called with nil TaskEndDetail\",\n\t\t\t\t\t\"origin\": origin,\n\t\t\t\t\t\"task_id\": taskId,\n\t\t\t\t\t\"task_status\": t.Status,\n\t\t\t\t})\n\t\t\t}\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// only allow re-execution for failed or successful tasks\n\tif !t.IsFinished() {\n\t\t// this is to disallow terminating running tasks via the UI\n\t\tif origin == evergreen.UIPackage || origin == evergreen.RESTV2Package {\n\t\t\tgrip.Debugf(\"Unsatisfiable '%s' reset request on '%s' (status: '%s')\",\n\t\t\t\tuser, t.Id, t.Status)\n\t\t\tif t.DisplayOnly {\n\t\t\t\texecTasks := map[string]string{}\n\t\t\t\tfor _, et := range t.ExecutionTasks {\n\t\t\t\t\texecTask, err = task.FindOneId(et)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\texecTasks[execTask.Id] = execTask.Status\n\t\t\t\t}\n\t\t\t\tgrip.Error(message.Fields{\n\t\t\t\t\t\"message\": \"attempt to restart unfinished display task\",\n\t\t\t\t\t\"task\": t.Id,\n\t\t\t\t\t\"status\": t.Status,\n\t\t\t\t\t\"exec_tasks\": execTasks,\n\t\t\t\t})\n\t\t\t}\n\t\t\treturn errors.Errorf(\"task '%s' currently has status '%s' - cannot reset task in this status\",\n\t\t\t\tt.Id, t.Status)\n\t\t}\n\t}\n\n\tif detail != nil {\n\t\tif err = t.MarkEnd(time.Now(), detail); err != nil {\n\t\t\treturn errors.Wrap(err, \"marking task as ended\")\n\t\t}\n\t}\n\n\tcaller := origin\n\tif origin == evergreen.UIPackage || origin == evergreen.RESTV2Package {\n\t\tcaller = user\n\t}\n\tif t.IsPartOfSingleHostTaskGroup() {\n\t\tif err = t.SetResetWhenFinished(); err != nil {\n\t\t\treturn errors.Wrap(err, \"marking task group for reset\")\n\t\t}\n\t\treturn errors.Wrap(checkResetSingleHostTaskGroup(ctx, t, caller), \"resetting single host task group\")\n\t}\n\n\treturn errors.WithStack(resetTask(ctx, t.Id, caller))\n}", "func (s *RealRaftStateManager) Recover() error {\n\terrorChannel := make(chan error)\n\ts.commandChannel <- &raftStateManagerRecover{errorChan: errorChannel}\n\treturn <-errorChannel\n}", "func MTTRecover() int64 {\n\treturn 0\n}", "func (broadcast *Broadcast) Recover(ctx context.Context, username, newResetPubKeyHex,\n\tnewTransactionPubKeyHex, newAppPubKeyHex, privKeyHex string, seq int64) (*model.BroadcastResponse, error) {\n\tresetPubKey, err := transport.GetPubKeyFromHex(newResetPubKeyHex)\n\tif err != nil {\n\t\treturn nil, errors.FailedToGetPubKeyFromHexf(\"Recover: failed to get Reset pub key\").AddCause(err)\n\t}\n\ttxPubKey, err := transport.GetPubKeyFromHex(newTransactionPubKeyHex)\n\tif err != nil {\n\t\treturn nil, errors.FailedToGetPubKeyFromHexf(\"Recover: failed to get Tx pub key\").AddCause(err)\n\t}\n\tappPubKey, err := transport.GetPubKeyFromHex(newAppPubKeyHex)\n\tif err != nil {\n\t\treturn nil, errors.FailedToGetPubKeyFromHexf(\"Recover: failed to get App pub key\").AddCause(err)\n\t}\n\n\tmsg := model.RecoverMsg{\n\t\tUsername: username,\n\t\tNewResetPubKey: resetPubKey,\n\t\tNewTransactionPubKey: txPubKey,\n\t\tNewAppPubKey: appPubKey,\n\t}\n\treturn broadcast.broadcastTransaction(ctx, msg, privKeyHex, seq, \"\", false)\n}", "func (oi *offsetInjector) recover(ctx context.Context, nodeID int) {\n\tif !oi.deployed {\n\t\toi.c.t.Fatal(\"Offset injector must be deployed before recovering from clock offsets\")\n\t}\n\n\tsyncCmds := [][]string{\n\t\t{\"sudo\", \"service\", \"ntp\", \"stop\"},\n\t\t{\"sudo\", \"ntpdate\", \"-u\", \"time.google.com\"},\n\t\t{\"sudo\", \"service\", \"ntp\", \"start\"},\n\t}\n\tfor _, cmd := range syncCmds {\n\t\toi.c.Run(\n\t\t\tctx,\n\t\t\toi.c.Node(nodeID),\n\t\t\tcmd...,\n\t\t)\n\t}\n}", "func (instanceApi *InstanceApi) Recover(id string) (bool, error) {\n\t_, err := instanceApi.entityService.Execute(id, INSTANCE_RECOVER_OPERATION, []byte{}, map[string]string{})\n\treturn err == nil, err\n}", "func (s *Stopper) Recover(ctx context.Context) {\n\tif r := recover(); r != nil {\n\t\tif s.onPanic != nil {\n\t\t\ts.onPanic(r)\n\t\t\treturn\n\t\t}\n\t\tif sv := settings.TODO(); sv != nil {\n\t\t\tlog.ReportPanic(ctx, sv, r, 1)\n\t\t}\n\t\tpanic(r)\n\t}\n}", "func executeCheckAndRecoverFunction(analysisEntry *inst.ReplicationAnalysis) (err error) {\n\tcountPendingRecoveries.Add(1)\n\tdefer countPendingRecoveries.Add(-1)\n\n\tcheckAndRecoverFunctionCode := getCheckAndRecoverFunctionCode(analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias)\n\tisActionableRecovery := hasActionableRecovery(checkAndRecoverFunctionCode)\n\tanalysisEntry.IsActionableRecovery = isActionableRecovery\n\trunEmergentOperations(analysisEntry)\n\n\tif checkAndRecoverFunctionCode == noRecoveryFunc {\n\t\t// Unhandled problem type\n\t\tif analysisEntry.Analysis != inst.NoProblem {\n\t\t\tif util.ClearToLog(\"executeCheckAndRecoverFunction\", analysisEntry.AnalyzedInstanceAlias) {\n\t\t\t\tlog.Warningf(\"executeCheckAndRecoverFunction: ignoring analysisEntry that has no action plan: %+v; tablet: %+v\",\n\t\t\t\t\tanalysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\t// we have a recovery function; its execution still depends on filters if not disabled.\n\tif isActionableRecovery || util.ClearToLog(\"executeCheckAndRecoverFunction: detection\", analysisEntry.AnalyzedInstanceAlias) {\n\t\tlog.Infof(\"executeCheckAndRecoverFunction: proceeding with %+v detection on %+v; isActionable?: %+v\", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias, isActionableRecovery)\n\t}\n\n\t// At this point we have validated there's a failure scenario for which we have a recovery path.\n\n\t// Initiate detection:\n\t_, _, err = checkAndExecuteFailureDetectionProcesses(analysisEntry)\n\tif err != nil {\n\t\tlog.Errorf(\"executeCheckAndRecoverFunction: error on failure detection: %+v\", err)\n\t\treturn err\n\t}\n\t// We don't mind whether detection really executed the processes or not\n\t// (it may have been silenced due to previous detection). We only care there's no error.\n\n\t// We're about to embark on recovery shortly...\n\n\t// Check for recovery being disabled globally\n\tif recoveryDisabledGlobally, err := IsRecoveryDisabled(); err != nil {\n\t\t// Unexpected. Shouldn't get this\n\t\tlog.Errorf(\"Unable to determine if recovery is disabled globally: %v\", err)\n\t} else if recoveryDisabledGlobally {\n\t\tlog.Infof(\"CheckAndRecover: Analysis: %+v, Tablet: %+v: NOT Recovering host (disabled globally)\",\n\t\t\tanalysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias)\n\n\t\treturn err\n\t}\n\n\t// We lock the shard here and then refresh the tablets information\n\tctx, unlock, err := LockShard(context.Background(), analysisEntry.AnalyzedInstanceAlias, getLockAction(analysisEntry.AnalyzedInstanceAlias, analysisEntry.Analysis))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer unlock(&err)\n\n\t// Check if the recovery is already fixed or not. We need this because vtorc works on ephemeral data to find the failure scenarios.\n\t// That data might be old, because of a cluster operation that was run through vtctld or some other vtorc. So before we do any\n\t// changes, we should be checking that this failure is indeed needed to be fixed. We do this after locking the shard to be sure\n\t// that the data that we use now is up-to-date.\n\tif isActionableRecovery {\n\t\tlog.Errorf(\"executeCheckAndRecoverFunction: Proceeding with %v recovery on %v validation after acquiring shard lock.\", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias)\n\t\t// The first step we have to do is refresh the keyspace and shard information\n\t\t// This is required to know if the durability policies have changed or not\n\t\t// If they have, then recoveries like ReplicaSemiSyncMustNotBeSet, etc won't be valid anymore.\n\t\t// Similarly, a new primary could have been elected in the mean-time that can cause\n\t\t// a change in the recovery we run.\n\t\terr = RefreshKeyspaceAndShard(analysisEntry.AnalyzedKeyspace, analysisEntry.AnalyzedShard)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// If we are about to run a cluster-wide recovery, it is imperative to first refresh all the tablets\n\t\t// of a shard because a new tablet could have been promoted, and we need to have this visibility before we\n\t\t// run a cluster operation of our own.\n\t\tif isClusterWideRecovery(checkAndRecoverFunctionCode) {\n\t\t\tvar tabletsToIgnore []string\n\t\t\tif checkAndRecoverFunctionCode == recoverDeadPrimaryFunc {\n\t\t\t\ttabletsToIgnore = append(tabletsToIgnore, analysisEntry.AnalyzedInstanceAlias)\n\t\t\t}\n\t\t\t// We ignore the dead primary tablet because it is going to be unreachable. If all the other tablets aren't able to reach this tablet either,\n\t\t\t// we can proceed with the dead primary recovery. We don't need to refresh the information for this dead tablet.\n\t\t\tforceRefreshAllTabletsInShard(ctx, analysisEntry.AnalyzedKeyspace, analysisEntry.AnalyzedShard, tabletsToIgnore)\n\t\t} else {\n\t\t\t// If we are not running a cluster-wide recovery, then it is only concerned with the specific tablet\n\t\t\t// on which the failure occurred and the primary instance of the shard.\n\t\t\t// For example, ConnectedToWrongPrimary analysis only cares for whom the current primary tablet is\n\t\t\t// and the host-port set on the tablet in question.\n\t\t\t// So, we only need to refresh the tablet info records (to know if the primary tablet has changed),\n\t\t\t// and the replication data of the new primary and this tablet.\n\t\t\trefreshTabletInfoOfShard(ctx, analysisEntry.AnalyzedKeyspace, analysisEntry.AnalyzedShard)\n\t\t\tDiscoverInstance(analysisEntry.AnalyzedInstanceAlias, true)\n\t\t\tprimaryTablet, err := shardPrimary(analysisEntry.AnalyzedKeyspace, analysisEntry.AnalyzedShard)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"executeCheckAndRecoverFunction: Analysis: %+v, Tablet: %+v: error while finding the shard primary: %v\",\n\t\t\t\t\tanalysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tprimaryTabletAlias := topoproto.TabletAliasString(primaryTablet.Alias)\n\t\t\t// We can skip the refresh if we know the tablet we are looking at is the primary tablet.\n\t\t\t// This would be the case for PrimaryHasPrimary recovery. We don't need to refresh the same tablet twice.\n\t\t\tif analysisEntry.AnalyzedInstanceAlias != primaryTabletAlias {\n\t\t\t\tDiscoverInstance(primaryTabletAlias, true)\n\t\t\t}\n\t\t}\n\t\talreadyFixed, err := checkIfAlreadyFixed(analysisEntry)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"executeCheckAndRecoverFunction: Analysis: %+v, Tablet: %+v: error while trying to find if the problem is already fixed: %v\",\n\t\t\t\tanalysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias, err)\n\t\t\treturn err\n\t\t}\n\t\tif alreadyFixed {\n\t\t\tlog.Infof(\"Analysis: %v on tablet %v - No longer valid, some other agent must have fixed the problem.\", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// Actually attempt recovery:\n\tif isActionableRecovery || util.ClearToLog(\"executeCheckAndRecoverFunction: recovery\", analysisEntry.AnalyzedInstanceAlias) {\n\t\tlog.Infof(\"executeCheckAndRecoverFunction: proceeding with %+v recovery on %+v; isRecoverable?: %+v\", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias, isActionableRecovery)\n\t}\n\trecoveryAttempted, topologyRecovery, err := getCheckAndRecoverFunction(checkAndRecoverFunctionCode)(ctx, analysisEntry)\n\tif !recoveryAttempted {\n\t\treturn err\n\t}\n\trecoveryName := getRecoverFunctionName(checkAndRecoverFunctionCode)\n\trecoveriesCounter.Add(recoveryName, 1)\n\tif err != nil {\n\t\trecoveriesFailureCounter.Add(recoveryName, 1)\n\t} else {\n\t\trecoveriesSuccessfulCounter.Add(recoveryName, 1)\n\t}\n\tif topologyRecovery == nil {\n\t\treturn err\n\t}\n\tif b, err := json.Marshal(topologyRecovery); err == nil {\n\t\tlog.Infof(\"Topology recovery: %+v\", string(b))\n\t} else {\n\t\tlog.Infof(\"Topology recovery: %+v\", topologyRecovery)\n\t}\n\t// If we ran a cluster wide recovery and actually attempted it, then we know that the replication state for all the tablets in this cluster\n\t// would have changed. So we can go ahead and pre-emptively refresh them.\n\t// For this refresh we don't use the same context that we used for the recovery, since that context might have expired or could expire soon\n\t// Instead we pass the background context. The call forceRefreshAllTabletsInShard handles adding a timeout to it for us.\n\tif isClusterWideRecovery(checkAndRecoverFunctionCode) {\n\t\tforceRefreshAllTabletsInShard(context.Background(), analysisEntry.AnalyzedKeyspace, analysisEntry.AnalyzedShard, nil)\n\t} else {\n\t\t// For all other recoveries, we would have changed the replication status of the analyzed tablet\n\t\t// so it doesn't hurt to re-read the information of this tablet, otherwise we'll requeue the same recovery\n\t\t// that we just completed because we would be using stale data.\n\t\tDiscoverInstance(analysisEntry.AnalyzedInstanceAlias, true)\n\t}\n\treturn err\n}", "func (tl TaskList) ReopenTask(id int) error {\n\tfor _, t := range tl {\n\t\tif t.ID == id {\n\t\t\tif !t.Complete {\n\t\t\t\treturn fmt.Errorf(\"task with ID '%d' is already open\", id)\n\t\t\t}\n\t\t\tt.Complete = false\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn fmt.Errorf(\"task with ID '%d' not found\", id)\n}", "func (s *Stopper) Recover(ctx context.Context) {\n\tif r := recover(); r != nil {\n\t\tif s.onPanic != nil {\n\t\t\ts.onPanic(r)\n\t\t\treturn\n\t\t}\n\t\tif sv := settings.TODO(); sv != nil {\n\t\t\tlogcrash.ReportPanic(ctx, sv, r, 1)\n\t\t}\n\t\tpanic(r)\n\t}\n}", "func RestartFailedTasks(ctx context.Context, opts RestartOptions) (RestartResults, error) {\n\tresults := RestartResults{}\n\tif !opts.IncludeTestFailed && !opts.IncludeSysFailed && !opts.IncludeSetupFailed {\n\t\topts.IncludeTestFailed = true\n\t\topts.IncludeSysFailed = true\n\t\topts.IncludeSetupFailed = true\n\t}\n\tfailureTypes := []string{}\n\tif opts.IncludeTestFailed {\n\t\tfailureTypes = append(failureTypes, evergreen.CommandTypeTest)\n\t}\n\tif opts.IncludeSysFailed {\n\t\tfailureTypes = append(failureTypes, evergreen.CommandTypeSystem)\n\t}\n\tif opts.IncludeSetupFailed {\n\t\tfailureTypes = append(failureTypes, evergreen.CommandTypeSetup)\n\t}\n\ttasksToRestart, err := task.FindAll(db.Query(task.ByTimeStartedAndFailed(opts.StartTime, opts.EndTime, failureTypes)))\n\tif err != nil {\n\t\treturn results, errors.WithStack(err)\n\t}\n\ttasksToRestart, err = task.AddParentDisplayTasks(tasksToRestart)\n\tif err != nil {\n\t\treturn results, errors.WithStack(err)\n\t}\n\n\ttype taskGroupAndBuild struct {\n\t\tBuild string\n\t\tTaskGroup string\n\t}\n\t// only need to check one task per task group / build combination, and once per display task\n\ttaskGroupsToCheck := map[taskGroupAndBuild]string{}\n\tdisplayTasksToCheck := map[string]task.Task{}\n\tidsToRestart := []string{}\n\tfor _, t := range tasksToRestart {\n\t\tif t.IsPartOfDisplay() {\n\t\t\tdt, err := t.GetDisplayTask()\n\t\t\tif err != nil {\n\t\t\t\treturn results, errors.Wrap(err, \"getting display task\")\n\t\t\t}\n\t\t\tdisplayTasksToCheck[t.DisplayTask.Id] = *dt\n\t\t} else if t.DisplayOnly {\n\t\t\tdisplayTasksToCheck[t.Id] = t\n\t\t} else if t.IsPartOfSingleHostTaskGroup() {\n\t\t\ttaskGroupsToCheck[taskGroupAndBuild{\n\t\t\t\tTaskGroup: t.TaskGroup,\n\t\t\t\tBuild: t.BuildId,\n\t\t\t}] = t.Id\n\t\t} else {\n\t\t\tidsToRestart = append(idsToRestart, t.Id)\n\t\t}\n\t}\n\n\tfor id, dt := range displayTasksToCheck {\n\t\tif dt.IsFinished() {\n\t\t\tidsToRestart = append(idsToRestart, id)\n\t\t} else {\n\t\t\tif err = dt.SetResetWhenFinished(); err != nil {\n\t\t\t\treturn results, errors.Wrapf(err, \"marking display task '%s' for reset\", id)\n\t\t\t}\n\t\t}\n\t}\n\tfor _, tg := range taskGroupsToCheck {\n\t\tidsToRestart = append(idsToRestart, tg)\n\t}\n\n\t// if this is a dry run, immediately return the tasks found\n\tif opts.DryRun {\n\t\tresults.ItemsRestarted = idsToRestart\n\t\treturn results, nil\n\t}\n\n\treturn doRestartFailedTasks(ctx, idsToRestart, opts.User, results), nil\n}", "func (t *trial) recover() error {\n\trunID, restarts, err := t.db.TrialRunIDAndRestarts(t.id)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"restoring old trial state\")\n\t}\n\tt.runID = runID\n\tt.restarts = restarts\n\treturn nil\n}", "func runEmergencyReparentOp(ctx context.Context, analysisEntry *inst.ReplicationAnalysis, recoveryName string, waitForAllTablets bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) {\n\tif !analysisEntry.ClusterDetails.HasAutomatedPrimaryRecovery {\n\t\treturn false, nil, nil\n\t}\n\n\t// Read the tablet information from the database to find the shard and keyspace of the tablet\n\ttablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceAlias)\n\tif err != nil {\n\t\treturn false, nil, err\n\t}\n\n\ttopologyRecovery, err = AttemptRecoveryRegistration(analysisEntry, true, true)\n\tif topologyRecovery == nil {\n\t\t_ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf(\"found an active or recent recovery on %+v. Will not issue another %v.\", analysisEntry.AnalyzedInstanceAlias, recoveryName))\n\t\treturn false, nil, err\n\t}\n\tlog.Infof(\"Analysis: %v, %v %+v\", analysisEntry.Analysis, recoveryName, analysisEntry.AnalyzedInstanceAlias)\n\tvar promotedReplica *inst.Instance\n\t// This has to be done in the end; whether successful or not, we should mark that the recovery is done.\n\t// So that after the active period passes, we are able to run other recoveries.\n\tdefer func() {\n\t\t_ = resolveRecovery(topologyRecovery, promotedReplica)\n\t}()\n\n\tev, err := reparentutil.NewEmergencyReparenter(ts, tmclient.NewTabletManagerClient(), logutil.NewCallbackLogger(func(event *logutilpb.Event) {\n\t\tlevel := event.GetLevel()\n\t\tvalue := event.GetValue()\n\t\t// we only log the warnings and errors explicitly, everything gets logged as an information message anyways in auditing topology recovery\n\t\tswitch level {\n\t\tcase logutilpb.Level_WARNING:\n\t\t\tlog.Warningf(\"ERS - %s\", value)\n\t\tcase logutilpb.Level_ERROR:\n\t\t\tlog.Errorf(\"ERS - %s\", value)\n\t\tdefault:\n\t\t\tlog.Infof(\"ERS - %s\", value)\n\t\t}\n\t\t_ = AuditTopologyRecovery(topologyRecovery, value)\n\t})).ReparentShard(ctx,\n\t\ttablet.Keyspace,\n\t\ttablet.Shard,\n\t\treparentutil.EmergencyReparentOptions{\n\t\t\tIgnoreReplicas: nil,\n\t\t\tWaitReplicasTimeout: time.Duration(config.Config.WaitReplicasTimeoutSeconds) * time.Second,\n\t\t\tPreventCrossCellPromotion: config.Config.PreventCrossDataCenterPrimaryFailover,\n\t\t\tWaitAllTablets: waitForAllTablets,\n\t\t},\n\t)\n\tif err != nil {\n\t\tlog.Errorf(\"Error running ERS - %v\", err)\n\t}\n\n\tif ev != nil && ev.NewPrimary != nil {\n\t\tpromotedReplica, _, _ = inst.ReadInstance(topoproto.TabletAliasString(ev.NewPrimary.Alias))\n\t}\n\tpostErsCompletion(topologyRecovery, analysisEntry, recoveryName, promotedReplica)\n\treturn true, topologyRecovery, err\n}", "func resetTask(ctx context.Context, settings *evergreen.Settings, taskId, username string, failedOnly bool) error {\n\tt, err := task.FindOneId(taskId)\n\tif err != nil {\n\t\treturn gimlet.ErrorResponse{\n\t\t\tStatusCode: http.StatusInternalServerError,\n\t\t\tMessage: errors.Wrapf(err, \"finding task '%s'\", t).Error(),\n\t\t}\n\t}\n\tif t == nil {\n\t\treturn gimlet.ErrorResponse{\n\t\t\tStatusCode: http.StatusNotFound,\n\t\t\tMessage: fmt.Sprintf(\"task '%s' not found\", taskId),\n\t\t}\n\t}\n\treturn errors.Wrapf(serviceModel.ResetTaskOrDisplayTask(ctx, settings, t, username, evergreen.RESTV2Package, failedOnly, nil), \"resetting task '%s'\", taskId)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
BuildContainerName returns the podman container name for a given TaskConfig
func BuildContainerName(cfg *drivers.TaskConfig) string { return fmt.Sprintf("%s-%s", cfg.Name, cfg.AllocID) }
[ "func constructContainerName(containerType string, app platform.App) (string, error) {\n\tcontainer, err := app.FindContainerByType(containerType)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tname := dockerutil.ContainerName(container)\n\treturn name, nil\n}", "func containerName(ctx *context.ExecuteContext, name string) string {\n\treturn fmt.Sprintf(\"%s-%s\", ctx.Env.Unique(), name)\n}", "func (e *dockerExec) containerName() string {\n\tpathHex := reflow.Digester.FromString(e.path()).Short()\n\treturn fmt.Sprintf(\"reflow-%s-%s-%s\", e.Executor.ID, e.id.Hex(), pathHex)\n}", "func MakeContainerName(appname, entrypoint, ident string) string {\n\treturn strings.Join([]string{appname, entrypoint, ident}, \"_\")\n}", "func makeContainerName(c *runtime.ContainerMetadata, s *runtime.PodSandboxMetadata) string {\n\treturn strings.Join([]string{\n\t\tc.Name, // 0\n\t\ts.Name, // 1: sandbox name\n\t\ts.Namespace, // 2: sandbox namespace\n\t\ts.Uid, // 3: sandbox uid\n\t\tfmt.Sprintf(\"%d\", c.Attempt), // 4\n\t}, nameDelimiter)\n}", "func (s Service) ContainerName() string {\n\treturn fmt.Sprintf(\"coz_neo-local_%s\", s.Image)\n}", "func (n Nightly) GetContainerName() string {\n\treturn \"lpn-\" + n.GetType()\n}", "func (n *natsDependency) GetContainerName() string {\n\treturn n.config.Name + n.config.Version\n}", "func (t *SelectorParameter) GetContainerName() string {\n\tif t.CmdParameter.ContainerName != \"\" {\n\t\treturn t.CmdParameter.ContainerName\n\t}\n\tif t.ConfigParameter.ContainerName != \"\" {\n\t\treturn t.ConfigParameter.ContainerName\n\t}\n\n\treturn \"\"\n}", "func (s Service) ContainerName() string {\n\treturn fmt.Sprintf(\"%s_%s\", ProjectName, strings.ReplaceAll(strings.ToLower(s.Name), \" \", \"_\"))\n}", "func (o IoTHubFileUploadOutput) ContainerName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IoTHubFileUpload) string { return v.ContainerName }).(pulumi.StringOutput)\n}", "func (e *localExocomDependency) GetContainerName() string {\n\treturn e.config.Name + e.config.Version\n}", "func (cni *ContrailCni) buildContainerIntfName(\n index int, isMetaPlugin bool) string {\n var intfName string\n if isMetaPlugin == true {\n intfName = cni.cniArgs.IfName\n } else {\n intfName = VIF_TYPE_ETH + strconv.Itoa(index)\n }\n log.Infof(\"Built container interface name - %s\", intfName)\n return intfName\n}", "func MakePodName(jobName string, taskName string, index int) string {\n\treturn fmt.Sprintf(jobhelpers.PodNameFmt, jobName, taskName, index)\n}", "func containerNameFromSpec(debugger string) string {\n\tcontainerVariant := \"gdb\"\n\tif debugger == \"dlv\" {\n\t\tcontainerVariant = \"dlv\"\n\t}\n\treturn fmt.Sprintf(\"%v-%v\", sqOpts.ParticularContainerRootName, containerVariant)\n}", "func MesosTaskIDToMarathonServiceIDContainerName(taskID string) (string, string) {\n\tparts := strings.Split(taskID, \".\")\n\tserviceID := \"\"\n\tcontainerName := \"\"\n\tif len(parts) < 2 {\n\t\treturn serviceID, containerName\n\t}\n\tif len(parts) == 3 {\n\t\tcontainerName = parts[2]\n\t}\n\tserviceIDParts := strings.Split(parts[0], \"_\")\n\tserviceID = \"/\" + strings.Join(serviceIDParts, \"/\")\n\treturn serviceID, containerName\n}", "func (c *ContainerConfigMapSpec) GetName() string {\n\tsuffix := fmt.Sprintf(\"%s-%s\", c.ContainerName, c.Subdir)\n\treturn util.PrefixConfigmap(c.qserv, suffix)\n}", "func getRenderableContainerName(nmd report.Node) string {\n\tfor _, key := range []string{\n\t\t// Amazon's ecs-agent produces huge Docker container names, destructively\n\t\t// derived from mangling Container Definition names in Task\n\t\t// Definitions.\n\t\t//\n\t\t// However, the ecs-agent provides a label containing the original Container\n\t\t// Definition name.\n\t\tdocker.LabelPrefix + AmazonECSContainerNameLabel,\n\t\t// Kubernetes also mangles its Docker container names and provides a\n\t\t// label with the original container name. However, note that this label\n\t\t// is only provided by Kubernetes versions >= 1.2 (see\n\t\t// https://github.com/kubernetes/kubernetes/pull/17234/ )\n\t\tdocker.LabelPrefix + KubernetesContainerNameLabel,\n\t\t// Marathon doesn't set any Docker labels and this is the only meaningful\n\t\t// attribute we can find to make Scope useful without Mesos plugin\n\t\tdocker.EnvPrefix + MarathonAppIDEnv,\n\t\tdocker.ContainerName,\n\t\tdocker.ContainerHostname,\n\t} {\n\t\tif label, ok := nmd.Latest.Lookup(key); ok {\n\t\t\treturn label\n\t\t}\n\t}\n\treturn \"\"\n}", "func (c Commerce) GetContainerName() string {\n\treturn \"lpn-\" + c.GetType()\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
StartTask creates and starts a new Container based on the given TaskConfig.
func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drivers.DriverNetwork, error) { if _, ok := d.tasks.Get(cfg.ID); ok { return nil, nil, fmt.Errorf("task with ID %q already started", cfg.ID) } var driverConfig TaskConfig if err := cfg.DecodeDriverConfig(&driverConfig); err != nil { return nil, nil, fmt.Errorf("failed to decode driver config: %v", err) } handle := drivers.NewTaskHandle(taskHandleVersion) handle.Config = cfg if driverConfig.Image == "" { return nil, nil, fmt.Errorf("image name required") } createOpts := api.SpecGenerator{} createOpts.ContainerBasicConfig.LogConfiguration = &api.LogConfig{} allArgs := []string{} if driverConfig.Command != "" { allArgs = append(allArgs, driverConfig.Command) } allArgs = append(allArgs, driverConfig.Args...) if driverConfig.Entrypoint != "" { createOpts.ContainerBasicConfig.Entrypoint = append(createOpts.ContainerBasicConfig.Entrypoint, driverConfig.Entrypoint) } containerName := BuildContainerName(cfg) // ensure to include port_map into tasks environment map cfg.Env = taskenv.SetPortMapEnvs(cfg.Env, driverConfig.PortMap) // Basic config options createOpts.ContainerBasicConfig.Name = containerName createOpts.ContainerBasicConfig.Command = allArgs createOpts.ContainerBasicConfig.Env = cfg.Env createOpts.ContainerBasicConfig.Hostname = driverConfig.Hostname createOpts.ContainerBasicConfig.Sysctl = driverConfig.Sysctl createOpts.ContainerBasicConfig.LogConfiguration.Path = cfg.StdoutPath // Storage config options createOpts.ContainerStorageConfig.Init = driverConfig.Init createOpts.ContainerStorageConfig.Image = driverConfig.Image createOpts.ContainerStorageConfig.InitPath = driverConfig.InitPath createOpts.ContainerStorageConfig.WorkDir = driverConfig.WorkingDir allMounts, err := d.containerMounts(cfg, &driverConfig) if err != nil { return nil, nil, err } createOpts.ContainerStorageConfig.Mounts = allMounts // Resources config options createOpts.ContainerResourceConfig.ResourceLimits = &spec.LinuxResources{ Memory: &spec.LinuxMemory{}, CPU: &spec.LinuxCPU{}, } if driverConfig.MemoryReservation != "" { reservation, err := memoryInBytes(driverConfig.MemoryReservation) if err != nil { return nil, nil, err } createOpts.ContainerResourceConfig.ResourceLimits.Memory.Reservation = &reservation } if cfg.Resources.NomadResources.Memory.MemoryMB > 0 { limit := cfg.Resources.NomadResources.Memory.MemoryMB * 1024 * 1024 createOpts.ContainerResourceConfig.ResourceLimits.Memory.Limit = &limit } if driverConfig.MemorySwap != "" { swap, err := memoryInBytes(driverConfig.MemorySwap) if err != nil { return nil, nil, err } createOpts.ContainerResourceConfig.ResourceLimits.Memory.Swap = &swap } if !d.cgroupV2 { swappiness := uint64(driverConfig.MemorySwappiness) createOpts.ContainerResourceConfig.ResourceLimits.Memory.Swappiness = &swappiness } // FIXME: can fail for nonRoot due to missing cpu limit delegation permissions, // see https://github.com/containers/podman/blob/master/troubleshooting.md if !d.systemInfo.Host.Rootless { cpuShares := uint64(cfg.Resources.LinuxResources.CPUShares) createOpts.ContainerResourceConfig.ResourceLimits.CPU.Shares = &cpuShares } // Security config options createOpts.ContainerSecurityConfig.CapAdd = driverConfig.CapAdd createOpts.ContainerSecurityConfig.CapDrop = driverConfig.CapDrop createOpts.ContainerSecurityConfig.User = cfg.User // Network config options for _, strdns := range driverConfig.Dns { ipdns := net.ParseIP(strdns) if ipdns == nil { return nil, nil, fmt.Errorf("Invald dns server address") } createOpts.ContainerNetworkConfig.DNSServers = append(createOpts.ContainerNetworkConfig.DNSServers, ipdns) } // Configure network if cfg.NetworkIsolation != nil && cfg.NetworkIsolation.Path != "" { createOpts.ContainerNetworkConfig.NetNS.NSMode = api.Path createOpts.ContainerNetworkConfig.NetNS.Value = cfg.NetworkIsolation.Path } else { if driverConfig.NetworkMode == "" { createOpts.ContainerNetworkConfig.NetNS.NSMode = api.Bridge } else if driverConfig.NetworkMode == "bridge" { createOpts.ContainerNetworkConfig.NetNS.NSMode = api.Bridge } else if driverConfig.NetworkMode == "host" { createOpts.ContainerNetworkConfig.NetNS.NSMode = api.Host } else if driverConfig.NetworkMode == "none" { createOpts.ContainerNetworkConfig.NetNS.NSMode = api.NoNetwork } else if driverConfig.NetworkMode == "slirp4netns" { createOpts.ContainerNetworkConfig.NetNS.NSMode = api.Slirp } else { return nil, nil, fmt.Errorf("Unknown/Unsupported network mode: %s", driverConfig.NetworkMode) } } portMappings, err := d.portMappings(cfg, driverConfig) if err != nil { return nil, nil, err } createOpts.ContainerNetworkConfig.PortMappings = portMappings containerID := "" recoverRunningContainer := false // check if there is a container with same name otherContainerInspect, err := d.podman.ContainerInspect(d.ctx, containerName) if err == nil { // ok, seems we found a container with similar name if otherContainerInspect.State.Running { // it's still running. So let's use it instead of creating a new one d.logger.Info("Detect running container with same name, we reuse it", "task", cfg.ID, "container", otherContainerInspect.ID) containerID = otherContainerInspect.ID recoverRunningContainer = true } else { // let's remove the old, dead container d.logger.Info("Detect stopped container with same name, removing it", "task", cfg.ID, "container", otherContainerInspect.ID) if err = d.podman.ContainerDelete(d.ctx, otherContainerInspect.ID, true, true); err != nil { return nil, nil, nstructs.WrapRecoverable(fmt.Sprintf("failed to remove dead container: %v", err), err) } } } if !recoverRunningContainer { // FIXME: there are more variations of image sources, we should handle it // e.g. oci-archive:/... etc // see also https://github.com/hashicorp/nomad-driver-podman/issues/69 // do we already have this image in local storage? haveImage, err := d.podman.ImageExists(d.ctx, createOpts.Image) if err != nil { return nil, nil, fmt.Errorf("failed to start task, unable to check for local image: %v", err) } if !haveImage { // image is not in local storage, so we need to pull it if err = d.podman.ImagePull(d.ctx, createOpts.Image); err != nil { return nil, nil, fmt.Errorf("failed to start task, unable to pull image %s: %v", createOpts.Image, err) } } createResponse, err := d.podman.ContainerCreate(d.ctx, createOpts) for _, w := range createResponse.Warnings { d.logger.Warn("Create Warning", "warning", w) } if err != nil { return nil, nil, fmt.Errorf("failed to start task, could not create container: %v", err) } containerID = createResponse.Id } cleanup := func() { d.logger.Debug("Cleaning up", "container", containerID) if err := d.podman.ContainerDelete(d.ctx, containerID, true, true); err != nil { d.logger.Error("failed to clean up from an error in Start", "error", err) } } if !recoverRunningContainer { if err = d.podman.ContainerStart(d.ctx, containerID); err != nil { cleanup() return nil, nil, fmt.Errorf("failed to start task, could not start container: %v", err) } } inspectData, err := d.podman.ContainerInspect(d.ctx, containerID) if err != nil { d.logger.Error("failed to inspect container", "err", err) cleanup() return nil, nil, fmt.Errorf("failed to start task, could not inspect container : %v", err) } net := &drivers.DriverNetwork{ PortMap: driverConfig.PortMap, IP: inspectData.NetworkSettings.IPAddress, AutoAdvertise: true, } h := &TaskHandle{ containerID: containerID, driver: d, taskConfig: cfg, procState: drivers.TaskStateRunning, exitResult: &drivers.ExitResult{}, startedAt: time.Now().Round(time.Millisecond), logger: d.logger.Named("podmanHandle"), totalCPUStats: stats.NewCpuStats(), userCPUStats: stats.NewCpuStats(), systemCPUStats: stats.NewCpuStats(), removeContainerOnExit: d.config.GC.Container, } driverState := TaskState{ ContainerID: containerID, TaskConfig: cfg, StartedAt: h.startedAt, Net: net, } if err := handle.SetDriverState(&driverState); err != nil { d.logger.Error("failed to start task, error setting driver state", "error", err) cleanup() return nil, nil, fmt.Errorf("failed to set driver state: %v", err) } d.tasks.Set(cfg.ID, h) go h.runContainerMonitor() d.logger.Info("Completely started container", "taskID", cfg.ID, "container", containerID, "ip", inspectData.NetworkSettings.IPAddress) return handle, net, nil }
[ "func (c *DockerScheduler) startTask(task *demand.Task) {\n\tvar labels = map[string]string{\n\t\tlabelMap: task.Name,\n\t}\n\n\tvar cmds = strings.Fields(task.Command)\n\n\tcreateOpts := docker.CreateContainerOptions{\n\t\tConfig: &docker.Config{\n\t\t\tImage: task.Image,\n\t\t\tCmd: cmds,\n\t\t\tAttachStdout: true,\n\t\t\tAttachStdin: true,\n\t\t\tLabels: labels,\n\t\t\tEnv: task.Env,\n\t\t},\n\t\tHostConfig: &docker.HostConfig{\n\t\t\tPublishAllPorts: task.PublishAllPorts,\n\t\t\tNetworkMode: task.NetworkMode,\n\t\t},\n\t}\n\n\tgo func() {\n\t\tscaling.Add(1)\n\t\tdefer scaling.Done()\n\n\t\tlog.Debugf(\"[start] task %s\", task.Name)\n\t\tcontainer, err := c.client.CreateContainer(createOpts)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Couldn't create container for task %s: %v\", task.Name, err)\n\t\t\treturn\n\t\t}\n\n\t\tvar containerID = container.ID[:12]\n\n\t\tc.Lock()\n\t\tc.taskContainers[task.Name][containerID] = &dockerContainer{\n\t\t\tstate: \"created\",\n\t\t}\n\t\tc.Unlock()\n\t\tlog.Debugf(\"[created] task %s ID %s\", task.Name, containerID)\n\n\t\t// Start it but passing nil for the HostConfig as this option was removed in Docker 1.12.\n\t\terr = c.client.StartContainer(containerID, nil)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Couldn't start container ID %s for task %s: %v\", containerID, task.Name, err)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Debugf(\"[starting] task %s ID %s\", task.Name, containerID)\n\n\t\tc.Lock()\n\t\tc.taskContainers[task.Name][containerID].state = \"starting\"\n\t\tc.Unlock()\n\t}()\n}", "func (e *ECS) StartTask(req *StartTaskReq) (*StartTaskResp, error) {\n\tif req == nil {\n\t\treturn nil, fmt.Errorf(\"The req params cannot be nil\")\n\t}\n\n\tparams := makeParams(\"StartTask\")\n\tif req.Cluster != \"\" {\n\t\tparams[\"cluster\"] = req.Cluster\n\t}\n\tif req.TaskDefinition != \"\" {\n\t\tparams[\"taskDefinition\"] = req.TaskDefinition\n\t}\n\tfor i, ci := range req.ContainerInstances {\n\t\tparams[fmt.Sprintf(\"containerInstances.member.%d\", i+1)] = ci\n\t}\n\tfor i, co := range req.Overrides.ContainerOverrides {\n\t\tkey := fmt.Sprintf(\"overrides.containerOverrides.member.%d\", i+1)\n\t\tparams[fmt.Sprintf(\"%s.name\", key)] = co.Name\n\t\tfor k, cmd := range co.Command {\n\t\t\tparams[fmt.Sprintf(\"%s.command.member.%d\", key, k+1)] = cmd\n\t\t}\n\t}\n\n\tresp := new(StartTaskResp)\n\tif err := e.query(params, resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func StartTask(ctx context.Context, id string, op Operation, driver Driver, opts Options) *Task {\n\tt := newTask(id, op)\n\tif t == nil {\n\t\treturn nil\n\t}\n\tt.start = time.Now()\n\tgo t.run(ctx, driver, opts)\n\treturn t\n}", "func (lenc *Lencak) StartTask(workSpaceName, taskName string, asService bool) bool {\n\treturn lenc.WithWorkspaceTask(workSpaceName, taskName, func(task *Task) {\n\t\tif asService {\n\t\t\ttask.serviceMu.Lock()\n\t\t\ttask.Service = true\n\t\t\ttask.serviceMu.Unlock()\n\t\t\tif task.ActiveTask == nil {\n\t\t\t\ttask.Start(lenc.sync)\n\t\t\t}\n\t\t} else {\n\t\t\ttask.Start(lenc.sync)\n\t\t}\n\t})\n}", "func (c *client) StartTask(ctx context.Context, data StartTaskRequest) error {\n\turl := c.createURLs(\"/_api/task\", nil)\n\n\treq, err := c.newRequests(\"POST\", url, data)\n\tif err != nil {\n\t\treturn maskAny(err)\n\t}\n\tif err := c.do(ctx, req, nil); err != nil {\n\t\treturn maskAny(err)\n\t}\n\n\treturn nil\n}", "func (c Control) ServeStartTask(w http.ResponseWriter, r *http.Request) {\n\tc.ServeTaskAction(w, r, true)\n}", "func (i *TaskRegisterUpdater) StartTask(ctx context.Context, action string, age time.Duration) (models.Task, error) {\n\n\treturn i.repository.GetTask(ctx, action, age)\n}", "func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drivers.DriverNetwork, error) {\n\tif _, ok := d.tasks.Get(cfg.ID); ok {\n\t\treturn nil, nil, fmt.Errorf(\"task with ID %q already started\", cfg.ID)\n\t}\n\n\tvar driverConfig api.TaskConfig\n\n\tif err := cfg.DecodeDriverConfig(&driverConfig); err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to decode task config: %v\", err)\n\t}\n\n\t// make all relevant strings lower case before processing\n\tdriverConfig.ToLower()\n\td.logger.Info(\"starting task\", \"driver_cfg\", hclog.Fmt(\"%+v\", driverConfig))\n\n\thandle := drivers.NewTaskHandle(taskHandleVersion)\n\thandle.Config = cfg\n\n\t// TODO: implement driver specific mechanism to start the task.\n\t//\n\t// Once the task is started you will need to store any relevant runtime\n\t// information in a taskHandle and TaskState. The taskHandle will be\n\t// stored in-memory in the plugin and will be used to interact with the\n\t// task.\n\t//\n\t// The TaskState will be returned to the Nomad client inside a\n\t// drivers.TaskHandle instance. This TaskHandle will be sent back to plugin\n\t// if the task ever needs to be recovered, so the TaskState should contain\n\t// enough information to handle that.\n\n\t// define and start domain\n\tdomainSpec, err := d.domainManager.SyncVM(cfg, &driverConfig)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// Detect domain address\n\t// stop and undefine domain if can't get ipv4 address\n\tguestIf, err := d.domainManager.DomainIfAddr(domainSpec.Name, true)\n\tif err != nil {\n\t\td.logger.Error(\"error getting domain address waiting for ipv4 addr\", \"error\", err)\n\t\td.domainManager.KillVM(domainSpec.Name)\n\t\td.domainManager.DestroyVM(domainSpec.Name)\n\t\treturn nil, nil, err\n\t}\n\n\t// default value for net, works for the following two cases:\n\t// 1. the domain has only lo interface\n\t// 2. or the domain has a non-lo interface but has no ip address assigned\n\tdrvNet := &drivers.DriverNetwork{}\n\n\tif guestIf != nil {\n\t\tfor _, ip := range guestIf.IPs {\n\t\t\td.logger.Debug(\"domain interface from guest agent\", \"ip\", ip.IP, \"type\", ip.Type, \"prefix\", ip.Prefix)\n\t\t\tif ip.Type == \"ipv4\" {\n\t\t\t\tdrvNet.IP = ip.IP\n\t\t\t\tif len(driverConfig.Interfaces) > 0 && driverConfig.Interfaces[0].InterfaceBindingMethod != \"network\" {\n\t\t\t\t\tdrvNet.AutoAdvertise = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Return a driver handle\n\th := &taskHandle{\n\t\tdomainManager: d.domainManager,\n\t\tresultChan: make(chan *drivers.ExitResult),\n\t\ttask: cfg, //contains taskid allocid for future use\n\t\tstartedAt: time.Now().Round(time.Millisecond),\n\t\tnet: drvNet,\n\t\tresourceUsage: &cstructs.TaskResourceUsage{\n\t\t\tResourceUsage: &cstructs.ResourceUsage{\n\t\t\t\tMemoryStats: &cstructs.MemoryStats{},\n\t\t\t\tCpuStats: &cstructs.CpuStats{},\n\t\t\t},\n\t\t}, // initial empty usage data, so that we won't return nil in stats channel\n\t}\n\n\t//config := &plugin.ClientConfig{\n\t//\tHandshakeConfig: base.Handshake,\n\t//\tPlugins: map[string]plugin.Plugin{\"executor\": p},\n\t//\tCmd: exec.Command(bin, \"executor\", string(c)),\n\t//\tAllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC},\n\t//\tLogger: logger.Named(\"executor\"),\n\t//}\n\t//\n\t//if driverConfig != nil {\n\t//\tconfig.MaxPort = driverConfig.ClientMaxPort\n\t//\tconfig.MinPort = driverConfig.ClientMinPort\n\t//} else {\n\t//\tconfig.MaxPort = ExecutorDefaultMaxPort\n\t//\tconfig.MinPort = ExecutorDefaultMinPort\n\t//}\n\n\tif err := handle.SetDriverState(h.buildState(cfg)); err != nil {\n\t\td.logger.Error(\"error persisting handle state\")\n\t\treturn nil, nil, err\n\t}\n\td.tasks.Set(cfg.ID, h)\n\n\td.logger.Debug(\"returning from starttask\")\n\treturn handle, drvNet, nil\n}", "func (h *Hub) StartTask(ctx context.Context, request *pb.HubStartTaskRequest) (*pb.HubStartTaskReply, error) {\n\tlog.G(h.ctx).Info(\"handling StartTask request\", zap.Any(\"req\", request))\n\n\ttaskID := uuid.New()\n\tminer, err := h.selectMiner(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar startRequest = &pb.MinerStartRequest{\n\t\tId: taskID,\n\t\tRegistry: request.Registry,\n\t\tImage: request.Image,\n\t\tAuth: request.Auth,\n\t\tPublicKeyData: request.PublicKeyData,\n\t\tCommitOnStop: request.CommitOnStop,\n\t\tEnv: request.Env,\n\t\tUsage: request.Requirements.GetResources(),\n\t\tRestartPolicy: &pb.ContainerRestartPolicy{\n\t\t\tName: \"\",\n\t\t\tMaximumRetryCount: 0,\n\t\t},\n\t}\n\n\tresp, err := miner.Client.Start(ctx, startRequest)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"failed to start %v\", err)\n\t}\n\n\troutes := []extRoute{}\n\tfor k, v := range resp.Ports {\n\t\t_, protocol, err := decodePortBinding(k)\n\t\tif err != nil {\n\t\t\tlog.G(h.ctx).Warn(\"failed to decode miner's port mapping\",\n\t\t\t\tzap.String(\"mapping\", k),\n\t\t\t\tzap.Error(err),\n\t\t\t)\n\t\t\tcontinue\n\t\t}\n\n\t\trealPort, err := strconv.ParseUint(v.Port, 10, 16)\n\t\tif err != nil {\n\t\t\tlog.G(h.ctx).Warn(\"failed to convert real port to uint16\",\n\t\t\t\tzap.Error(err),\n\t\t\t\tzap.String(\"port\", v.Port),\n\t\t\t)\n\t\t\tcontinue\n\t\t}\n\n\t\troute, err := miner.router.RegisterRoute(taskID, protocol, v.IP, uint16(realPort))\n\t\tif err != nil {\n\t\t\tlog.G(h.ctx).Warn(\"failed to register route\", zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\t\troutes = append(routes, extRoute{\n\t\t\tcontainerPort: k,\n\t\t\troute: route,\n\t\t})\n\t}\n\n\th.setMinerTaskID(miner.ID(), taskID)\n\n\tresources := request.GetRequirements().GetResources()\n\tcpuCount := resources.GetCPUCores()\n\tmemoryCount := resources.GetMaxMemory()\n\n\tvar usage = resource.NewResources(int(cpuCount), int64(memoryCount))\n\tif err := miner.Consume(taskID, &usage); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar reply = pb.HubStartTaskReply{\n\t\tId: taskID,\n\t}\n\n\tfor _, route := range routes {\n\t\treply.Endpoint = append(\n\t\t\treply.Endpoint,\n\t\t\tfmt.Sprintf(\"%s->%s:%d\", route.containerPort, route.route.Host, route.route.Port),\n\t\t)\n\t}\n\n\treturn &reply, nil\n}", "func StartTaskService(brain *brain.Manager, errChan chan error) {\n\tlis, err := net.Listen(\"tcp\", taskServicePort)\n\tif err != nil {\n\t\terrChan <- err\n\t\treturn\n\t}\n\n\tgrpcServer := grpc.NewServer()\n\n\tRegisterTaskServiceServer(grpcServer, TaskService{Manager: brain})\n\n\tlog.LogInfo(\"starting taask-server task service on :3688\")\n\tif err := grpcServer.Serve(lis); err != nil {\n\t\terrChan <- err\n\t}\n}", "func StartDMTask(fw portforward.PortForward, ns, masterSvcName, taskConf, errSubStr string) error {\n\tapiPath := \"/apis/v1alpha1/tasks\"\n\n\ttype Req struct {\n\t\tTask string `json:\"task\"`\n\t}\n\ttype Resp struct {\n\t\tResult bool `json:\"result\"`\n\t\tMsg string `json:\"msg\"`\n\t\tCheckResult string `json:\"checkResult\"`\n\t}\n\n\tvar req = Req{\n\t\tTask: fmt.Sprintf(taskConf, DMTaskName(ns), v1alpha1.DefaultTiDBServerPort, DMTaskName(ns)),\n\t}\n\tdata, err := json.Marshal(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal task start request, %v, %v\", req, err)\n\t}\n\n\treturn wait.Poll(5*time.Second, time.Minute, func() (bool, error) {\n\t\tlocalHost, localPort, cancel, err := portforward.ForwardOnePort(\n\t\t\tfw, ns, fmt.Sprintf(\"svc/%s\", masterSvcName), dmMasterSvcPort)\n\t\tif err != nil {\n\t\t\tlog.Logf(\"failed to forward dm-master svc: %v\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\tdefer cancel()\n\n\t\tbody, err := httputil.DoBodyOK(\n\t\t\t&http.Client{Transport: &http.Transport{}},\n\t\t\tfmt.Sprintf(\"http://%s:%d%s\", localHost, localPort, apiPath),\n\t\t\t\"POST\",\n\t\t\tbytes.NewReader(data))\n\t\tif err != nil {\n\t\t\tlog.Logf(\"failed to start DM task: %v\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\tvar resp Resp\n\t\tif err = json.Unmarshal(body, &resp); err != nil {\n\t\t\tlog.Logf(\"failed to unmarshal DM task start response, %s: %v\", string(body), err)\n\t\t\treturn false, nil\n\t\t} else if !resp.Result && !strings.Contains(resp.Msg, \"already exists\") {\n\t\t\tif errSubStr != \"\" && strings.Contains(resp.Msg, errSubStr) {\n\t\t\t\tlog.Logf(\"start DM task match the error sub string %q: %s\", errSubStr, resp.Msg)\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\tlog.Logf(\"failed to start DM task, msg: %s, err: %v, checkResult: %s\", resp.Msg, err, resp.CheckResult)\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t})\n}", "func (rm *ResponseManager) StartTask(task *peertask.Task, responseTaskDataChan chan<- ResponseTaskData) {\n\trm.send(&startTaskRequest{task, responseTaskDataChan}, nil)\n}", "func (w *Worker) StartSubTask(cfg *config.SubTaskConfig) (int64, error) {\n\tw.Lock()\n\tdefer w.Unlock()\n\n\t// copy some config item from dm-worker's config\n\tw.copyConfigFromWorker(cfg)\n\tcfgStr, err := cfg.Toml()\n\tif err != nil {\n\t\treturn 0, terror.Annotatef(err, \"encode subtask %+v into toml format\", cfg)\n\t}\n\n\topLogID, err := w.operateSubTask(&pb.TaskMeta{\n\t\tOp: pb.TaskOp_Start,\n\t\tName: cfg.Name,\n\t\tTask: append([]byte{}, cfgStr...),\n\t})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn opLogID, nil\n}", "func CtrStartContainer(domainName string) (int, error) {\n\tif err := verifyCtr(); err != nil {\n\t\treturn 0, fmt.Errorf(\"CtrStartContainer: exception while verifying ctrd client: %s\", err.Error())\n\t}\n\tctr, err := CtrLoadContainer(domainName)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tlogger := GetLog()\n\n\tio := func(id string) (cio.IO, error) {\n\t\tstdoutFile := logger.Path(domainName + \".out\")\n\t\tstderrFile := logger.Path(domainName)\n\t\treturn &logio{\n\t\t\tcio.Config{\n\t\t\t\tStdin: \"/dev/null\",\n\t\t\t\tStdout: stdoutFile,\n\t\t\t\tStderr: stderrFile,\n\t\t\t\tTerminal: false,\n\t\t\t},\n\t\t}, nil\n\t}\n\ttask, err := ctr.NewTask(ctrdCtx, io)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif err := prepareProcess(int(task.Pid()), nil); err != nil {\n\t\treturn 0, err\n\t}\n\n\tif err := task.Start(ctrdCtx); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn int(task.Pid()), nil\n}", "func (c *Client) StartBkOpsTask(url string, paras *TaskPathParas,\n\trequest *StartTaskRequest) (*StartTaskResponse, error) {\n\tif c == nil {\n\t\treturn nil, ErrServerNotInit\n\t}\n\n\tvar (\n\t\treqURL = fmt.Sprintf(\"/start_task/%s/%s/\", paras.TaskID, paras.BkBizID)\n\t\trespData = &StartTaskResponse{}\n\t)\n\n\trequest.Scope = string(CmdbBizScope)\n\tuserAuth, err := c.generateGateWayAuth(paras.Operator)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"bksops StartBkOpsTask generateGateWayAuth failed: %v\", err)\n\t}\n\n\t_, _, errs := gorequest.New().\n\t\tTimeout(defaultTimeOut).\n\t\tPost(c.server+reqURL).\n\t\tSet(\"Content-Type\", \"application/json\").\n\t\tSet(\"Accept\", \"application/json\").\n\t\tSet(\"X-Bkapi-Authorization\", userAuth).\n\t\tSetDebug(c.serverDebug).\n\t\tSend(request).\n\t\tEndStruct(&respData)\n\tif len(errs) > 0 {\n\t\tblog.Errorf(\"call api StartBkOpsTask failed: %v\", errs[0])\n\t\treturn nil, errs[0]\n\t}\n\n\tif !respData.Result {\n\t\tblog.Errorf(\"call api StartBkOpsTask failed: %v\", respData.Message)\n\t\treturn nil, fmt.Errorf(respData.Message)\n\t}\n\n\t//successfully request\n\tblog.Infof(\"call api StartBkOpsTask with url(%s) successfully\", reqURL)\n\treturn respData, nil\n}", "func Start(ctx context.Context, fun TaskFunc) Task {\n\treturn (&taskGroup{ctx: ctx}).Task(fun)\n}", "func (c *ECS) StartTaskRequest(input *StartTaskInput) (req *aws.Request, output *StartTaskOutput) {\n\toprw.Lock()\n\tdefer oprw.Unlock()\n\n\tif opStartTask == nil {\n\t\topStartTask = &aws.Operation{\n\t\t\tName: \"StartTask\",\n\t\t\tHTTPMethod: \"POST\",\n\t\t\tHTTPPath: \"/\",\n\t\t}\n\t}\n\n\treq = c.newRequest(opStartTask, input, output)\n\toutput = &StartTaskOutput{}\n\treq.Data = output\n\treturn\n}", "func InitiateRakeTask(taskName string, settings *models.Settings) {\n\trakeTask := map[string]string{}\n\tb, err := json.Marshal(rakeTask)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\tos.Exit(1)\n\t}\n\tencodedTaskName, err := url.Parse(taskName)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\tos.Exit(1)\n\t}\n\thttpclient.Post(b, fmt.Sprintf(\"%s/v1/environments/%s/services/%s/rake/%s\", settings.PaasHost, settings.EnvironmentID, settings.ServiceID, encodedTaskName), true, settings)\n}", "func runTask(ctx context.Context, cfg *taskConfig, f TaskFunc, args ...interface{}) *Task {\n\ttask := newTask(ctx, cfg, f, args...)\n\ttask.Start()\n\treturn task\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WaitTask function is expected to return a channel that will send an ExitResult when the task exits or close the channel when the context is canceled. It is also expected that calling WaitTask on an exited task will immediately send an ExitResult on the returned channel. A call to WaitTask after StopTask is valid and should be handled. If WaitTask is called after DestroyTask, it should return drivers.ErrTaskNotFound as no task state should exist after DestroyTask is called.
func (d *Driver) WaitTask(ctx context.Context, taskID string) (<-chan *drivers.ExitResult, error) { d.logger.Debug("WaitTask called", "task", taskID) handle, ok := d.tasks.Get(taskID) if !ok { return nil, drivers.ErrTaskNotFound } ch := make(chan *drivers.ExitResult) go handle.runExitWatcher(ctx, ch) return ch, nil }
[ "func (d *Driver) WaitTask(ctx context.Context, taskID string) (<-chan *drivers.ExitResult, error) {\n\td.logger.Debug(\"waittaks called\")\n\th, ok := d.tasks.Get(taskID)\n\tif !ok {\n\t\treturn nil, drivers.ErrTaskNotFound\n\t}\n\n\td.logger.Debug(\"wait task returning\")\n\treturn h.resultChan, nil\n}", "func (t *Task) Wait(timeout time.Duration) (TaskResult, error) {\n\tselect {\n\tcase <-t.startedChan:\n\tdefault:\n\t\treturn nil, ErrNotExecuting\n\t}\n\n\tt.resultLock.RLock()\n\tif t.waitResult != nil {\n\t\tt.resultLock.RUnlock()\n\t\treturn t.waitResult, nil\n\t}\n\tt.resultLock.RUnlock()\n\n\tvar timeoutChan <-chan time.Time = make(chan time.Time)\n\tif timeout > 0 {\n\t\ttimeoutChan = t.cfg.clock.After(timeout)\n\t}\n\n\tselect {\n\tcase res := <-t.resultChan:\n\t\tt.completed(res)\n\t\tt.SetRunning(false)\n\t\treturn res, nil\n\tcase <-timeoutChan:\n\t\treturn nil, ErrTimeout\n\t}\n}", "func WaitForTask(clientV3 nutanixclientv3.Service, taskUUID string) error {\n\tfinished := false\n\tvar err error\n\tfor start := time.Now(); time.Since(start) < timeout; {\n\t\tfinished, err = isTaskFinished(clientV3, taskUUID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif finished {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(sleepTime)\n\t}\n\tif !finished {\n\t\treturn errors.Errorf(\"timeout while waiting for task UUID: %s\", taskUUID)\n\t}\n\n\treturn nil\n}", "func (tm *TaskManager) WaitAndGetTask() (int, interface{}) {\n\tfor {\n\t\tfor tm.totalNumberOfTasks == 0 {\n\t\t\t// TODO: find a better way\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t}\n\n\t\ttm.mutex.Lock()\n\t\ttask := tm.tasks[tm.currentTaskIndex]\n\t\ttm.mutex.Unlock()\n\t\t// if task is there and we're not out of allowed tasks per cycle\n\t\tif len(task.Channel) != 0 && task.Counter < task.Importance {\n\t\t\t// TODO: consider using mutex\n\t\t\ttm.mutex.Lock()\n\t\t\ttm.totalNumberOfTasks--\n\t\t\ttm.mutex.Unlock()\n\t\t\ttask.Counter++\n\t\t\treturn task.ID, <-task.Channel\n\t\t}\n\t\ttask.Counter = 0\n\n\t\ttm.mutex.Lock()\n\t\ttm.currentTaskIndex++\n\t\tif tm.currentTaskIndex >= uint(len(tm.tasks)) {\n\t\t\ttm.currentTaskIndex = 0\n\t\t}\n\t\ttm.mutex.Unlock()\n\t}\n}", "func (th stoppedTaskHandle) Wait(duration time.Duration) (bool, error) {\n\treturn true, nil\n}", "func (ft *FutureTask) Wait(timeout time.Duration) (res *[]byte, err error) {\n\tselect {\n\tcase res = <-ft.out:\n\tcase <-time.After(timeout):\n\t\terr = fmt.Errorf(\"task(%+v) timeout\", ft)\n\t}\n\treturn\n}", "func (t *TaskBox[T, U, C, CT, TF]) GetTaskCh() chan Task[T] {\n\treturn t.task\n}", "func (th runningTaskHandle) Wait(duration time.Duration) (bool, error) {\n\treturn true, nil\n}", "func (s *InvokeSync) Wait(ctx context.Context) error {\n\tif !s.wait.Wait(ctx) {\n\t\treturn task.StopReason(ctx)\n\t}\n\treturn s.err\n}", "func (t *Task) Wait() {\n\t<-t.done\n}", "func WaitForAsyncTaskDone(ctx context.Context, taskID string, client *goqedit.APIClient) (goqedit.GetTaskStatusResponse, error) {\n\tvar taskStatus goqedit.GetTaskStatusResponse\n\ttaskStatusRequest := goqedit.GetTaskStatusRequest{Id: taskID}\n\tfor i := 0; i < asyncTaskRetries; i++ {\n\t\ttaskStatus, _, err := client.NodeApi.NodeGetTaskStatusPost(ctx, taskStatusRequest)\n\t\tif err != nil {\n\t\t\treturn taskStatus, fmt.Errorf(\"couldn't get task status: %v\", ErrorResponseString(err))\n\t\t}\n\t\tif taskStatus.Result != \"pending\" && taskStatus.Result != \"in_progress\" {\n\t\t\treturn taskStatus, nil\n\t\t}\n\t\tfmt.Println(\"Waiting for task to be done\")\n\t\ttime.Sleep(asyncTaskWaitTime)\n\t}\n\treturn taskStatus, fmt.Errorf(\"waiting for task timed out after %d iterations\", asyncTaskRetries)\n}", "func NewWaitTask(name string, ids object.ObjMetadataSet, cond Condition, timeout time.Duration, mapper meta.RESTMapper) *WaitTask {\n\treturn &WaitTask{\n\t\tTaskName: name,\n\t\tIds: ids,\n\t\tCondition: cond,\n\t\tTimeout: timeout,\n\t\tMapper: mapper,\n\t}\n}", "func (d *Driver) DestroyTask(taskID string, force bool) error {\n\thandle, ok := d.tasks.Get(taskID)\n\tif !ok {\n\t\treturn drivers.ErrTaskNotFound\n\t}\n\n\tif handle.isRunning() && !force {\n\t\treturn fmt.Errorf(\"cannot destroy running task\")\n\t}\n\n\tif handle.isRunning() {\n\t\td.logger.Debug(\"Have to destroyTask but container is still running\", \"containerID\", handle.containerID)\n\t\t// we can not do anything, so catching the error is useless\n\t\terr := d.podman.ContainerStop(d.ctx, handle.containerID, 60)\n\t\tif err != nil {\n\t\t\td.logger.Warn(\"failed to stop/kill container during destroy\", \"error\", err)\n\t\t}\n\t\t// wait a while for stats emitter to collect exit code etc.\n\t\tfor i := 0; i < 20; i++ {\n\t\t\tif !handle.isRunning() {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttime.Sleep(time.Millisecond * 250)\n\t\t}\n\t\tif handle.isRunning() {\n\t\t\td.logger.Warn(\"stats emitter did not exit while stop/kill container during destroy\", \"error\", err)\n\t\t}\n\t}\n\n\tif handle.removeContainerOnExit {\n\t\terr := d.podman.ContainerDelete(d.ctx, handle.containerID, true, true)\n\t\tif err != nil {\n\t\t\td.logger.Warn(\"Could not remove container\", \"container\", handle.containerID, \"error\", err)\n\t\t}\n\t}\n\n\td.tasks.Delete(taskID)\n\treturn nil\n}", "func waitForTask() {\n\t// create UNBUFFERED channel for messages of type string\n\tch := make(chan string)\n\n\tgo func() {\n\t\t// wait here until manager gives us a task\n\t\t// (because channel is unbuffered/blocking)\n\t\tp := <-ch // channel receive unary operator\n\t\tfmt.Println(\"worker: received signal:\", p)\n\t}()\n\n\t// wait here for a bit\n\ttime.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond)\n\t// send message\n\tch <- \"paper\"\n\tfmt.Println(\"manager: sent signal\")\n\n\ttime.Sleep(time.Second)\n\tfmt.Println(\"------------ done ---------\")\n}", "func (d *Driver) DestroyTask(taskID string, force bool) error {\n\td.logger.Debug(\"DestroyTask called\")\n\th, ok := d.tasks.Get(taskID)\n\tif !ok {\n\t\treturn drivers.ErrTaskNotFound\n\t}\n\n\t// implement driver specific logic to destroy a complete task.\n\t//\n\t// Destroying a task includes removing any resources used by task and any\n\t// local references in the plugin. If force is set to true the task should\n\t// be destroyed even if it's currently running.\n\tif err := h.DestroyVM(); err != nil {\n\t\treturn err\n\t}\n\n\td.tasks.Delete(taskID)\n\td.logger.Debug(\"DestroyTask returning\")\n\treturn nil\n}", "func monitorTask(ctx context.Context, task *model.Task, channel chan model.Event) {\n\t// derive new timeout context\n\tmonitorCtx, cancel := context.WithTimeout(ctx, 10 * time.Second)\n\tdefer cancel()\n\n\tselect {\n\tcase <- monitorCtx.Done():\n\t\t// check status of task\n\t\tstatus := task.GetStatus()\n\n\t\tif status != model.TaskStatusInitial && status != model.TaskStatusExecuting {\n\t\t\treturn\n\t\t}\n\n\t\t// task may still be active\n\t\tswitch monitorCtx.Err().Error() {\n\t\tcase \"context canceled\": // termination of processes\n\t\t\tutil.LogInfo(task.UUID, \"ENG\", \"termination\")\n\t\t\tchannel <- model.NewEvent(task.Domain, task.UUID, model.EventTypeTaskTermination, task.UUID, \"termination\")\n\t\tdefault: // timeout\n\t\t\tutil.LogInfo(task.UUID, \"ENG\", \"timeout\")\n\t\t\tchannel <- model.NewEvent(task.Domain, task.UUID, model.EventTypeTaskTimeout, task.UUID, \"timeout\")\n\t\t}\n\t}\n}", "func completeIfWaitTask(currentTask Task, taskContext *TaskContext) {\n\tif wt, ok := currentTask.(*WaitTask); ok {\n\t\twt.complete(taskContext)\n\t}\n}", "func (e *Eval) taskWait(ctx context.Context, f *Flow, task *sched.Task) error {\n\tif err := task.Wait(ctx, sched.TaskRunning); err != nil {\n\t\treturn err\n\t}\n\t// Grab the task's exec so that it can be logged properly.\n\tf.Exec = task.Exec\n\tif f.Op == Exec && f.Argmap != nil {\n\t\t// If this is an Exec and f.Argmap is defined, then\n\t\t// update the flow's resolved filesets.\n\t\tn := f.NExecArg()\n\t\tf.resolvedFs = make([]*reflow.Fileset, n)\n\t\tfor i := 0; i < n; i++ {\n\t\t\tearg, arg := f.ExecArg(i), task.Config.Args[i]\n\t\t\tif earg.Out {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tf.resolvedFs[earg.Index] = arg.Fileset\n\t\t}\n\t}\n\te.LogFlow(ctx, f)\n\tif err := task.Wait(ctx, sched.TaskDone); err != nil {\n\t\treturn err\n\t}\n\tf.RunInfo = task.RunInfo\n\tif task.Err != nil {\n\t\te.Mutate(f, task.Err, Done)\n\t} else {\n\t\te.Mutate(f, task.Result.Err, task.Result.Fileset, Propagate, Done)\n\t}\n\treturn nil\n}", "func (d *Driver) StopTask(taskID string, timeout time.Duration, signal string) error {\n\td.logger.Info(\"Stopping task\", \"taskID\", taskID, \"signal\", signal)\n\thandle, ok := d.tasks.Get(taskID)\n\tif !ok {\n\t\treturn drivers.ErrTaskNotFound\n\t}\n\t// fixme send proper signal to container\n\terr := d.podman.ContainerStop(d.ctx, handle.containerID, int(timeout.Seconds()))\n\tif err != nil {\n\t\td.logger.Error(\"Could not stop/kill container\", \"containerID\", handle.containerID, \"err\", err)\n\t\treturn err\n\t}\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }