query
stringlengths
8
6.75k
document
stringlengths
9
1.89M
negatives
sequencelengths
19
19
metadata
dict
NewOffChainReportingSpec initializes a new OffChainReportingSpec from a job.OCROracleSpec
func NewOffChainReportingSpec(spec *job.OCROracleSpec) *OffChainReportingSpec { return &OffChainReportingSpec{ ContractAddress: spec.ContractAddress, P2PBootstrapPeers: spec.P2PBootstrapPeers, P2PV2Bootstrappers: spec.P2PV2Bootstrappers, IsBootstrapPeer: spec.IsBootstrapPeer, EncryptedOCRKeyBundleID: spec.EncryptedOCRKeyBundleID, TransmitterAddress: spec.TransmitterAddress, ObservationTimeout: spec.ObservationTimeout, ObservationTimeoutEnv: spec.ObservationTimeoutEnv, BlockchainTimeout: spec.BlockchainTimeout, BlockchainTimeoutEnv: spec.BlockchainTimeoutEnv, ContractConfigTrackerSubscribeInterval: spec.ContractConfigTrackerSubscribeInterval, ContractConfigTrackerSubscribeIntervalEnv: spec.ContractConfigTrackerSubscribeIntervalEnv, ContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval, ContractConfigTrackerPollIntervalEnv: spec.ContractConfigTrackerPollIntervalEnv, ContractConfigConfirmations: spec.ContractConfigConfirmations, ContractConfigConfirmationsEnv: spec.ContractConfigConfirmationsEnv, CreatedAt: spec.CreatedAt, UpdatedAt: spec.UpdatedAt, EVMChainID: spec.EVMChainID, DatabaseTimeout: spec.DatabaseTimeout, DatabaseTimeoutEnv: spec.DatabaseTimeoutEnv, ObservationGracePeriod: spec.ObservationGracePeriod, ObservationGracePeriodEnv: spec.ObservationGracePeriodEnv, ContractTransmitterTransmitTimeout: spec.ContractTransmitterTransmitTimeout, ContractTransmitterTransmitTimeoutEnv: spec.ContractTransmitterTransmitTimeoutEnv, CollectTelemetry: spec.CaptureEATelemetry, } }
[ "func NewOffChainReportingSpec(spec *job.OffchainReportingOracleSpec) *OffChainReportingSpec {\n\treturn &OffChainReportingSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tP2PPeerID: spec.P2PPeerID,\n\t\tP2PBootstrapPeers: spec.P2PBootstrapPeers,\n\t\tIsBootstrapPeer: spec.IsBootstrapPeer,\n\t\tEncryptedOCRKeyBundleID: spec.EncryptedOCRKeyBundleID,\n\t\tTransmitterAddress: spec.TransmitterAddress,\n\t\tObservationTimeout: spec.ObservationTimeout,\n\t\tBlockchainTimeout: spec.BlockchainTimeout,\n\t\tContractConfigTrackerSubscribeInterval: spec.ContractConfigTrackerSubscribeInterval,\n\t\tContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval,\n\t\tContractConfigConfirmations: spec.ContractConfigConfirmations,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func NewOffChainReporting2Spec(spec *job.OCR2OracleSpec) *OffChainReporting2Spec {\n\treturn &OffChainReporting2Spec{\n\t\tContractID: spec.ContractID,\n\t\tRelay: spec.Relay,\n\t\tRelayConfig: spec.RelayConfig,\n\t\tP2PV2Bootstrappers: spec.P2PV2Bootstrappers,\n\t\tOCRKeyBundleID: spec.OCRKeyBundleID,\n\t\tTransmitterID: spec.TransmitterID,\n\t\tBlockchainTimeout: spec.BlockchainTimeout,\n\t\tContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval,\n\t\tContractConfigConfirmations: spec.ContractConfigConfirmations,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t\tCollectTelemetry: spec.CaptureEATelemetry,\n\t}\n}", "func NewSpec(details *SpecDetails) *Spec {\n\treturn &Spec{\n\t\tDetails: details,\n\t\tServices: NewServiceList(),\n\t\tStatus: SpecWaiting,\n\t}\n}", "func newOtlpReceiver(cfg *Config, set receiver.CreateSettings) (*otlpReceiver, error) {\n\tr := &otlpReceiver{\n\t\tcfg: cfg,\n\t\tsettings: set,\n\t}\n\tif cfg.HTTP != nil {\n\t\tr.httpMux = http.NewServeMux()\n\t}\n\n\tvar err error\n\tr.obsrepGRPC, err = obsreport.NewReceiver(obsreport.ReceiverSettings{\n\t\tReceiverID: set.ID,\n\t\tTransport: \"grpc\",\n\t\tReceiverCreateSettings: set,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.obsrepHTTP, err = obsreport.NewReceiver(obsreport.ReceiverSettings{\n\t\tReceiverID: set.ID,\n\t\tTransport: \"http\",\n\t\tReceiverCreateSettings: set,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r, nil\n}", "func NewKeeperSpec(spec *job.KeeperSpec) *KeeperSpec {\n\treturn &KeeperSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tFromAddress: spec.FromAddress,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func NewKeeperSpec(spec *job.KeeperSpec) *KeeperSpec {\n\treturn &KeeperSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tFromAddress: spec.FromAddress,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t\tEVMChainID: spec.EVMChainID,\n\t}\n}", "func NewBootstrapSpec(spec *job.BootstrapSpec) *BootstrapSpec {\n\treturn &BootstrapSpec{\n\t\tContractID: spec.ContractID,\n\t\tRelay: spec.Relay,\n\t\tRelayConfig: spec.RelayConfig,\n\t\tBlockchainTimeout: spec.BlockchainTimeout,\n\t\tContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval,\n\t\tContractConfigConfirmations: spec.ContractConfigConfirmations,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func newPeerAuthenticationWithSpec() *securityv1beta1.PeerAuthentication {\n\tpeerAuthentication := newPeerAuthentication()\n\tpeerAuthentication.Spec.PortLevelMtls = map[uint32]*securityv1beta1apis.PeerAuthentication_MutualTLS{\n\t\ttTargetPort: {\n\t\t\tMode: securityv1beta1apis.PeerAuthentication_MutualTLS_PERMISSIVE,\n\t\t},\n\t}\n\tpeerAuthentication.Spec.Selector = &istiov1beta1apis.WorkloadSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\tapplicationLabelKey: tName,\n\t\t},\n\t}\n\treturn peerAuthentication\n}", "func newE2ESetup(msg *e2e.SetupReq) *colibri_mgmt.E2ESetup {\n\tallocTrail := make([]uint8, len(msg.AllocationTrail))\n\tfor i := range msg.AllocationTrail {\n\t\tallocTrail[i] = uint8(msg.AllocationTrail[i])\n\t}\n\treturn &colibri_mgmt.E2ESetup{\n\t\tBase: newE2EBase(&msg.Request),\n\t\tSegmentRsvs: newSegmentIDs(msg.SegmentRsvs),\n\t\tSegmentRsvASCount: msg.SegmentRsvASCount,\n\t\tRequestedBW: uint8(msg.RequestedBW),\n\t\tAllocationTrail: allocTrail,\n\t}\n}", "func NewSpec(serviceAccountName string, containers []corev1.Container, volumes []corev1.Volume) *Builder {\n\treturn &Builder{spec: newPodSpec(serviceAccountName, containers, volumes)}\n}", "func NewSpecConverter(t mockConstructorTestingTNewSpecConverter) *SpecConverter {\n\tmock := &SpecConverter{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func newReportBuilder() *reportBuilder {\n\treturn &reportBuilder{\n\t\tpackageBuilders: make(map[string]*packageBuilder),\n\t\tbuildErrors: make(map[int]gtr.Error),\n\t\tnextID: 1,\n\t\toutput: collector.New(),\n\t\ttimestampFunc: time.Now,\n\t}\n}", "func setupDiffReport(r *Report) {\n\tr.format.output = printDiffReport\n\tr.format.changestyles = make(map[string]ChangeStyle)\n\tr.format.changestyles[\"ADD\"] = ChangeStyle{color: \"green\", message: \"has been added:\"}\n\tr.format.changestyles[\"REMOVE\"] = ChangeStyle{color: \"red\", message: \"has been removed:\"}\n\tr.format.changestyles[\"MODIFY\"] = ChangeStyle{color: \"yellow\", message: \"has changed:\"}\n}", "func NewWebhookSpec(spec *job.WebhookSpec) *WebhookSpec {\n\treturn &WebhookSpec{\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func NewCoherenceInternalSpec(cluster *CoherenceCluster, role *CoherenceRole) *CoherenceInternalSpec {\n\tout := CoherenceInternalSpec{}\n\n\tout.FullnameOverride = role.Name\n\tout.Cluster = cluster.Name\n\tout.ServiceAccountName = cluster.Spec.ServiceAccountName\n\tout.AutomountServiceAccountToken = cluster.Spec.AutomountServiceAccountToken\n\tout.ImagePullSecrets = cluster.Spec.ImagePullSecrets\n\tout.WKA = cluster.GetWkaServiceName()\n\tout.OperatorRequestTimeout = cluster.Spec.OperatorRequestTimeout\n\n\tout.CoherenceRoleSpec = CoherenceRoleSpec{}\n\trole.Spec.DeepCopyInto(&out.CoherenceRoleSpec)\n\n\treturn &out\n}", "func NewMergeSpec(ctx context.Context, rsr env.RepoStateReader, ddb *doltdb.DoltDB, roots doltdb.Roots, name, email, msg, commitSpecStr string, squash, noff, force, noCommit, noEdit bool, date time.Time) (*MergeSpec, error) {\n\theadCS, err := doltdb.NewCommitSpec(\"HEAD\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\theadRef, err := rsr.CWBHeadRef()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\theadCM, err := ddb.Resolve(context.TODO(), headCS, headRef)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmergeCS, err := doltdb.NewCommitSpec(commitSpecStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmergeCM, err := ddb.Resolve(context.TODO(), mergeCS, headRef)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\theadH, err := headCM.HashOf()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmergeH, err := mergeCM.HashOf()\n\tif err != nil {\n\t\treturn nil, err\n\n\t}\n\n\tstompedTblNames, workingDiffs, err := MergeWouldStompChanges(ctx, roots, mergeCM)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%w; %s\", ErrFailedToDetermineMergeability, err.Error())\n\t}\n\n\treturn &MergeSpec{\n\t\tHeadH: headH,\n\t\tMergeH: mergeH,\n\t\tHeadC: headCM,\n\t\tMergeCSpecStr: commitSpecStr,\n\t\tMergeC: mergeCM,\n\t\tStompedTblNames: stompedTblNames,\n\t\tWorkingDiffs: workingDiffs,\n\t\tSquash: squash,\n\t\tMsg: msg,\n\t\tNoff: noff,\n\t\tNoCommit: noCommit,\n\t\tNoEdit: noEdit,\n\t\tForce: force,\n\t\tEmail: email,\n\t\tName: name,\n\t\tDate: date,\n\t}, nil\n}", "func New(opts ...NewOption) internal.Reporter {\n\tr := &reporter{}\n\t// apply the new options\n\tfor _, opt := range opts {\n\t\topt(r)\n\t}\n\n\treturn r\n}", "func newWorker(\n\tm *manager,\n\tthirdComponent *v1alpha1.ThirdComponent,\n\tendpoint v1alpha1.ThirdComponentEndpointStatus) *worker {\n\n\tw := &worker{\n\t\tstopCh: make(chan struct{}, 1), // Buffer so stop() can be non-blocking.\n\t\tprobeManager: m,\n\t\tthirdComponent: thirdComponent,\n\t\tendpoint: endpoint,\n\t}\n\n\tw.spec = thirdComponent.Spec.Probe\n\tw.resultsManager = m.readinessManager\n\tw.initialValue = results.Failure\n\n\tbasicMetricLabels := metrics.Labels{\n\t\t\"endpoint\": string(w.endpoint.Address),\n\t\t\"pod\": w.thirdComponent.Name,\n\t\t\"namespace\": w.thirdComponent.Namespace,\n\t}\n\n\tw.proberResultsSuccessfulMetricLabels = deepCopyPrometheusLabels(basicMetricLabels)\n\tw.proberResultsSuccessfulMetricLabels[\"result\"] = probeResultSuccessful\n\n\tw.proberResultsFailedMetricLabels = deepCopyPrometheusLabels(basicMetricLabels)\n\tw.proberResultsFailedMetricLabels[\"result\"] = probeResultFailed\n\n\tw.proberResultsUnknownMetricLabels = deepCopyPrometheusLabels(basicMetricLabels)\n\tw.proberResultsUnknownMetricLabels[\"result\"] = probeResultUnknown\n\n\treturn w\n}", "func NewPrintConnector()(*PrintConnector) {\n m := &PrintConnector{\n Entity: *NewEntity(),\n }\n return m\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewOffChainReporting2Spec initializes a new OffChainReportingSpec from a job.OCR2OracleSpec
func NewOffChainReporting2Spec(spec *job.OCR2OracleSpec) *OffChainReporting2Spec { return &OffChainReporting2Spec{ ContractID: spec.ContractID, Relay: spec.Relay, RelayConfig: spec.RelayConfig, P2PV2Bootstrappers: spec.P2PV2Bootstrappers, OCRKeyBundleID: spec.OCRKeyBundleID, TransmitterID: spec.TransmitterID, BlockchainTimeout: spec.BlockchainTimeout, ContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval, ContractConfigConfirmations: spec.ContractConfigConfirmations, CreatedAt: spec.CreatedAt, UpdatedAt: spec.UpdatedAt, CollectTelemetry: spec.CaptureEATelemetry, } }
[ "func NewOffChainReportingSpec(spec *job.OCROracleSpec) *OffChainReportingSpec {\n\treturn &OffChainReportingSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tP2PBootstrapPeers: spec.P2PBootstrapPeers,\n\t\tP2PV2Bootstrappers: spec.P2PV2Bootstrappers,\n\t\tIsBootstrapPeer: spec.IsBootstrapPeer,\n\t\tEncryptedOCRKeyBundleID: spec.EncryptedOCRKeyBundleID,\n\t\tTransmitterAddress: spec.TransmitterAddress,\n\t\tObservationTimeout: spec.ObservationTimeout,\n\t\tObservationTimeoutEnv: spec.ObservationTimeoutEnv,\n\t\tBlockchainTimeout: spec.BlockchainTimeout,\n\t\tBlockchainTimeoutEnv: spec.BlockchainTimeoutEnv,\n\t\tContractConfigTrackerSubscribeInterval: spec.ContractConfigTrackerSubscribeInterval,\n\t\tContractConfigTrackerSubscribeIntervalEnv: spec.ContractConfigTrackerSubscribeIntervalEnv,\n\t\tContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval,\n\t\tContractConfigTrackerPollIntervalEnv: spec.ContractConfigTrackerPollIntervalEnv,\n\t\tContractConfigConfirmations: spec.ContractConfigConfirmations,\n\t\tContractConfigConfirmationsEnv: spec.ContractConfigConfirmationsEnv,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t\tEVMChainID: spec.EVMChainID,\n\t\tDatabaseTimeout: spec.DatabaseTimeout,\n\t\tDatabaseTimeoutEnv: spec.DatabaseTimeoutEnv,\n\t\tObservationGracePeriod: spec.ObservationGracePeriod,\n\t\tObservationGracePeriodEnv: spec.ObservationGracePeriodEnv,\n\t\tContractTransmitterTransmitTimeout: spec.ContractTransmitterTransmitTimeout,\n\t\tContractTransmitterTransmitTimeoutEnv: spec.ContractTransmitterTransmitTimeoutEnv,\n\t\tCollectTelemetry: spec.CaptureEATelemetry,\n\t}\n}", "func NewOffChainReportingSpec(spec *job.OffchainReportingOracleSpec) *OffChainReportingSpec {\n\treturn &OffChainReportingSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tP2PPeerID: spec.P2PPeerID,\n\t\tP2PBootstrapPeers: spec.P2PBootstrapPeers,\n\t\tIsBootstrapPeer: spec.IsBootstrapPeer,\n\t\tEncryptedOCRKeyBundleID: spec.EncryptedOCRKeyBundleID,\n\t\tTransmitterAddress: spec.TransmitterAddress,\n\t\tObservationTimeout: spec.ObservationTimeout,\n\t\tBlockchainTimeout: spec.BlockchainTimeout,\n\t\tContractConfigTrackerSubscribeInterval: spec.ContractConfigTrackerSubscribeInterval,\n\t\tContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval,\n\t\tContractConfigConfirmations: spec.ContractConfigConfirmations,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func newE2ESetup(msg *e2e.SetupReq) *colibri_mgmt.E2ESetup {\n\tallocTrail := make([]uint8, len(msg.AllocationTrail))\n\tfor i := range msg.AllocationTrail {\n\t\tallocTrail[i] = uint8(msg.AllocationTrail[i])\n\t}\n\treturn &colibri_mgmt.E2ESetup{\n\t\tBase: newE2EBase(&msg.Request),\n\t\tSegmentRsvs: newSegmentIDs(msg.SegmentRsvs),\n\t\tSegmentRsvASCount: msg.SegmentRsvASCount,\n\t\tRequestedBW: uint8(msg.RequestedBW),\n\t\tAllocationTrail: allocTrail,\n\t}\n}", "func NewSpec(details *SpecDetails) *Spec {\n\treturn &Spec{\n\t\tDetails: details,\n\t\tServices: NewServiceList(),\n\t\tStatus: SpecWaiting,\n\t}\n}", "func newOtlpReceiver(cfg *Config, set receiver.CreateSettings) (*otlpReceiver, error) {\n\tr := &otlpReceiver{\n\t\tcfg: cfg,\n\t\tsettings: set,\n\t}\n\tif cfg.HTTP != nil {\n\t\tr.httpMux = http.NewServeMux()\n\t}\n\n\tvar err error\n\tr.obsrepGRPC, err = obsreport.NewReceiver(obsreport.ReceiverSettings{\n\t\tReceiverID: set.ID,\n\t\tTransport: \"grpc\",\n\t\tReceiverCreateSettings: set,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.obsrepHTTP, err = obsreport.NewReceiver(obsreport.ReceiverSettings{\n\t\tReceiverID: set.ID,\n\t\tTransport: \"http\",\n\t\tReceiverCreateSettings: set,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r, nil\n}", "func (s BootstrapSpec) AsOCR2Spec() OCR2OracleSpec {\n\treturn OCR2OracleSpec{\n\t\tID: s.ID,\n\t\tContractID: s.ContractID,\n\t\tRelay: s.Relay,\n\t\tRelayConfig: s.RelayConfig,\n\t\tMonitoringEndpoint: s.MonitoringEndpoint,\n\t\tBlockchainTimeout: s.BlockchainTimeout,\n\t\tContractConfigTrackerPollInterval: s.ContractConfigTrackerPollInterval,\n\t\tContractConfigConfirmations: s.ContractConfigConfirmations,\n\t\tCreatedAt: s.CreatedAt,\n\t\tUpdatedAt: s.UpdatedAt,\n\t\tP2PV2Bootstrappers: pq.StringArray{},\n\t}\n}", "func NewSpec(api *gmail.Service, db *db.DB) (*Spec, error) {\n\tlog.SetLevel(log.DebugLevel)\n\tlog.Info(\"starting new spec\")\n\n\tbytes, err := ioutil.ReadFile(\"./spec.yaml\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read file: %v\", err)\n\t}\n\n\tspec := &Spec{\n\t\tapi: api,\n\t\tdb: db,\n\t}\n\n\terr = yaml.Unmarshal(bytes, spec)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal: %v\", err)\n\t}\n\n\treturn spec, nil\n}", "func NewSpecConverter(t mockConstructorTestingTNewSpecConverter) *SpecConverter {\n\tmock := &SpecConverter{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func newReceiver(\n\tparams receiver.CreateSettings,\n\tconfig *Config,\n\tnextConsumer consumer.Traces,\n) (receiver.Traces, error) {\n\t// build the response message\n\tdefaultResponse := &splunksapm.PostSpansResponse{}\n\tdefaultResponseBytes, err := defaultResponse.Marshal()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to marshal default response body for %v receiver: %w\", params.ID, err)\n\t}\n\ttransport := \"http\"\n\tif config.TLSSetting != nil {\n\t\ttransport = \"https\"\n\t}\n\tobsrecv, err := obsreport.NewReceiver(obsreport.ReceiverSettings{\n\t\tReceiverID: params.ID,\n\t\tTransport: transport,\n\t\tReceiverCreateSettings: params,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &sapmReceiver{\n\t\tsettings: params.TelemetrySettings,\n\t\tconfig: config,\n\t\tnextConsumer: nextConsumer,\n\t\tdefaultResponse: defaultResponseBytes,\n\t\tobsrecv: obsrecv,\n\t}, nil\n}", "func New(opts ...NewOption) internal.Reporter {\n\tr := &reporter{}\n\t// apply the new options\n\tfor _, opt := range opts {\n\t\topt(r)\n\t}\n\n\treturn r\n}", "func newWorkBookFromOle2(rs io.ReadSeeker) *WorkBook {\n\twb := new(WorkBook)\n\twb.Formats = make(map[uint16]*Format)\n\t// wb.bts = bts\n\twb.rs = rs\n\twb.sheets = make([]*WorkSheet, 0)\n\twb.Parse(rs)\n\treturn wb\n}", "func NewCoherenceInternalSpec(cluster *CoherenceCluster, role *CoherenceRole) *CoherenceInternalSpec {\n\tout := CoherenceInternalSpec{}\n\n\tout.FullnameOverride = role.Name\n\tout.Cluster = cluster.Name\n\tout.ServiceAccountName = cluster.Spec.ServiceAccountName\n\tout.AutomountServiceAccountToken = cluster.Spec.AutomountServiceAccountToken\n\tout.ImagePullSecrets = cluster.Spec.ImagePullSecrets\n\tout.WKA = cluster.GetWkaServiceName()\n\tout.OperatorRequestTimeout = cluster.Spec.OperatorRequestTimeout\n\n\tout.CoherenceRoleSpec = CoherenceRoleSpec{}\n\trole.Spec.DeepCopyInto(&out.CoherenceRoleSpec)\n\n\treturn &out\n}", "func New(transport runtime.ClientTransport, formats strfmt.Registry) *CrowdStrikeAPISpecification {\n\t// ensure nullable parameters have default\n\tif formats == nil {\n\t\tformats = strfmt.Default\n\t}\n\n\tcli := new(CrowdStrikeAPISpecification)\n\tcli.Transport = transport\n\tcli.Alerts = alerts.New(transport, formats)\n\tcli.CloudConnectAws = cloud_connect_aws.New(transport, formats)\n\tcli.ConfigurationAssessment = configuration_assessment.New(transport, formats)\n\tcli.ConfigurationAssessmentEvaluationLogic = configuration_assessment_evaluation_logic.New(transport, formats)\n\tcli.CspmRegistration = cspm_registration.New(transport, formats)\n\tcli.CustomIoa = custom_ioa.New(transport, formats)\n\tcli.D4cRegistration = d4c_registration.New(transport, formats)\n\tcli.Detects = detects.New(transport, formats)\n\tcli.DeviceControlPolicies = device_control_policies.New(transport, formats)\n\tcli.Discover = discover.New(transport, formats)\n\tcli.DiscoverIot = discover_iot.New(transport, formats)\n\tcli.EventSchema = event_schema.New(transport, formats)\n\tcli.EventStreams = event_streams.New(transport, formats)\n\tcli.FalconCompleteDashboard = falcon_complete_dashboard.New(transport, formats)\n\tcli.FalconContainer = falcon_container.New(transport, formats)\n\tcli.FalconContainerCli = falcon_container_cli.New(transport, formats)\n\tcli.FalconContainerImage = falcon_container_image.New(transport, formats)\n\tcli.FalconxSandbox = falconx_sandbox.New(transport, formats)\n\tcli.FieldSchema = field_schema.New(transport, formats)\n\tcli.Filevantage = filevantage.New(transport, formats)\n\tcli.FirewallManagement = firewall_management.New(transport, formats)\n\tcli.FirewallPolicies = firewall_policies.New(transport, formats)\n\tcli.HostGroup = host_group.New(transport, formats)\n\tcli.Hosts = hosts.New(transport, formats)\n\tcli.IdentityEntities = identity_entities.New(transport, formats)\n\tcli.IdentityProtection = identity_protection.New(transport, formats)\n\tcli.Incidents = incidents.New(transport, formats)\n\tcli.InstallationTokens = installation_tokens.New(transport, formats)\n\tcli.InstallationTokensSettings = installation_tokens_settings.New(transport, formats)\n\tcli.Intel = intel.New(transport, formats)\n\tcli.Inventories = inventories.New(transport, formats)\n\tcli.IoaExclusions = ioa_exclusions.New(transport, formats)\n\tcli.Ioc = ioc.New(transport, formats)\n\tcli.Iocs = iocs.New(transport, formats)\n\tcli.KubernetesProtection = kubernetes_protection.New(transport, formats)\n\tcli.Malquery = malquery.New(transport, formats)\n\tcli.MessageCenter = message_center.New(transport, formats)\n\tcli.MlExclusions = ml_exclusions.New(transport, formats)\n\tcli.MobileEnrollment = mobile_enrollment.New(transport, formats)\n\tcli.Mssp = mssp.New(transport, formats)\n\tcli.Oauth2 = oauth2.New(transport, formats)\n\tcli.Ods = ods.New(transport, formats)\n\tcli.OverwatchDashboard = overwatch_dashboard.New(transport, formats)\n\tcli.PreventionPolicies = prevention_policies.New(transport, formats)\n\tcli.Provision = provision.New(transport, formats)\n\tcli.Quarantine = quarantine.New(transport, formats)\n\tcli.QuickScan = quick_scan.New(transport, formats)\n\tcli.RealTimeResponse = real_time_response.New(transport, formats)\n\tcli.RealTimeResponseAdmin = real_time_response_admin.New(transport, formats)\n\tcli.Recon = recon.New(transport, formats)\n\tcli.ReportExecutions = report_executions.New(transport, formats)\n\tcli.ResponsePolicies = response_policies.New(transport, formats)\n\tcli.SampleUploads = sample_uploads.New(transport, formats)\n\tcli.ScheduledReports = scheduled_reports.New(transport, formats)\n\tcli.SensorDownload = sensor_download.New(transport, formats)\n\tcli.SensorUpdatePolicies = sensor_update_policies.New(transport, formats)\n\tcli.SensorVisibilityExclusions = sensor_visibility_exclusions.New(transport, formats)\n\tcli.TailoredIntelligence = tailored_intelligence.New(transport, formats)\n\tcli.UserManagement = user_management.New(transport, formats)\n\tcli.Vulnerabilities = vulnerabilities.New(transport, formats)\n\tcli.VulnerabilitiesEvaluationLogic = vulnerabilities_evaluation_logic.New(transport, formats)\n\tcli.ZeroTrustAssessment = zero_trust_assessment.New(transport, formats)\n\treturn cli\n}", "func newBatchSpecExecutionResetter(s *store.Store, observationContext *observation.Context, metrics batchChangesMetrics) *dbworker.Resetter {\n\tworkerStore := NewExecutorStore(s, observationContext)\n\n\toptions := dbworker.ResetterOptions{\n\t\tName: \"batch_spec_executor_resetter\",\n\t\tInterval: 1 * time.Minute,\n\t\tMetrics: metrics.executionResetterMetrics,\n\t}\n\n\tresetter := dbworker.NewResetter(workerStore, options)\n\treturn resetter\n}", "func newReportBuilder() *reportBuilder {\n\treturn &reportBuilder{\n\t\tpackageBuilders: make(map[string]*packageBuilder),\n\t\tbuildErrors: make(map[int]gtr.Error),\n\t\tnextID: 1,\n\t\toutput: collector.New(),\n\t\ttimestampFunc: time.Now,\n\t}\n}", "func createVendorSpec(b *troubleshootv1beta2.SupportBundle) (*troubleshootv1beta2.SupportBundle, error) {\n\tsupportBundle, err := staticspecs.GetVendorSpec()\n\tif err != nil {\n\t\tlogger.Errorf(\"Failed to load vendor support bundle spec: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tif b.Spec.Collectors != nil {\n\t\tsupportBundle.Spec.Collectors = b.DeepCopy().Spec.Collectors\n\t}\n\tif b.Spec.Analyzers != nil {\n\t\tsupportBundle.Spec.Analyzers = b.DeepCopy().Spec.Analyzers\n\t}\n\treturn supportBundle, nil\n}", "func New() *Spec {\n\treturn &Spec{}\n}", "func setupDiffReport(r *Report) {\n\tr.format.output = printDiffReport\n\tr.format.changestyles = make(map[string]ChangeStyle)\n\tr.format.changestyles[\"ADD\"] = ChangeStyle{color: \"green\", message: \"has been added:\"}\n\tr.format.changestyles[\"REMOVE\"] = ChangeStyle{color: \"red\", message: \"has been removed:\"}\n\tr.format.changestyles[\"MODIFY\"] = ChangeStyle{color: \"yellow\", message: \"has changed:\"}\n}", "func AlertmanagerSpec() *AlertmanagerSpecApplyConfiguration {\n\treturn &AlertmanagerSpecApplyConfiguration{}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewPipelineSpec generates a new PipelineSpec from a pipeline.Spec
func NewPipelineSpec(spec *pipeline.Spec) PipelineSpec { return PipelineSpec{ ID: spec.ID, JobID: spec.JobID, DotDAGSource: spec.DotDagSource, } }
[ "func NewPipelineSpec(spec *pipeline.Spec) PipelineSpec {\n\treturn PipelineSpec{\n\t\tID: spec.ID,\n\t\tDotDAGSource: spec.DotDagSource,\n\t}\n}", "func (in *PipelineSpec) DeepCopy() *PipelineSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PipelineSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewSpec(details *SpecDetails) *Spec {\n\treturn &Spec{\n\t\tDetails: details,\n\t\tServices: NewServiceList(),\n\t\tStatus: SpecWaiting,\n\t}\n}", "func New() *Spec {\n\treturn &Spec{}\n}", "func NewSpec(api *gmail.Service, db *db.DB) (*Spec, error) {\n\tlog.SetLevel(log.DebugLevel)\n\tlog.Info(\"starting new spec\")\n\n\tbytes, err := ioutil.ReadFile(\"./spec.yaml\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read file: %v\", err)\n\t}\n\n\tspec := &Spec{\n\t\tapi: api,\n\t\tdb: db,\n\t}\n\n\terr = yaml.Unmarshal(bytes, spec)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal: %v\", err)\n\t}\n\n\treturn spec, nil\n}", "func NewFromSpec(spec *rspec.Spec) Generator {\n\treturn Generator{\n\t\tspec: spec,\n\t}\n}", "func NewSpec(serviceAccountName string, containers []corev1.Container, volumes []corev1.Volume) *Builder {\n\treturn &Builder{spec: newPodSpec(serviceAccountName, containers, volumes)}\n}", "func NewSpec(source reference.Named, digest, imageID digest.Digest, tlsVerify *bool, listDigest string, localName string) Spec {\n\tif localName == \"\" {\n\t\tlocalName = source.String()\n\t}\n\treturn Spec{\n\t\tSource: source.Name(),\n\t\tDigest: digest.String(),\n\t\tTLSVerify: tlsVerify,\n\t\tImageID: imageID.String(),\n\t\tLocalName: localName,\n\t\tListDigest: listDigest,\n\t}\n}", "func (in *TracePipelineSpec) DeepCopy() *TracePipelineSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(TracePipelineSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewStructSpec(name string) *StructSpec {\n\treturn &StructSpec{\n\t\tName: name,\n\t}\n}", "func NewSpecFactory() *SpecFactory {\n\treturn &SpecFactory{}\n}", "func newFieldSpec() FieldSpec {\n\treturn FieldSpec(make(map[string]pair))\n}", "func NewPipeline(ctx *pulumi.Context,\n\tname string, args *PipelineArgs, opts ...pulumi.ResourceOption) (*Pipeline, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.BootstrapConfiguration == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'BootstrapConfiguration'\")\n\t}\n\tif args.PipelineType == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'PipelineType'\")\n\t}\n\tif args.ResourceGroupName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ResourceGroupName'\")\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:devops/v20200713preview:Pipeline\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:devops:Pipeline\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:devops:Pipeline\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:devops/v20190701preview:Pipeline\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:devops/v20190701preview:Pipeline\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource Pipeline\n\terr := ctx.RegisterResource(\"azure-native:devops/v20200713preview:Pipeline\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewPipeline(\n\tbeatInfo Info,\n\tconfig Config,\n\tprocessors Supporter,\n\tmakeOutput func(Observer) (string, Group, error),\n) (*Pipeline, error) {\n\tsettings := Settings{\n\t\tWaitClose: 0,\n\t\tWaitCloseMode: NoWaitOnClose,\n\t\tProcessors: processors,\n\t}\n\n\toutput, err := console.NewConsoleOutput()\n\tqueue := memoryNewMemoryQueue()\n\n\tpipeline := &Pipeline{\n\t\tbeatInfo: beat,\n\t\twaitCloseMode: settings.WaitCloseMode,\n\t\twaitCloseTimeout: settings.WaitClose,\n\t\tprocessors: settings.Processors,\n\t}\n\n\tpipeline.queue = queue\n\tpipeline.output = newOutputController(beat, pipeline.queue)\n\tpipeline.output.Set(output)\n\n\treturn pipeline, nil\n}", "func NewSpecConverter(t mockConstructorTestingTNewSpecConverter) *SpecConverter {\n\tmock := &SpecConverter{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func (in *LogPipelineSpec) DeepCopy() *LogPipelineSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(LogPipelineSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func InstallNewPipeline(ctx context.Context, driver ProtocolDriver, exporterOpts ...ExporterOption) (*Exporter,\n\t*sdktrace.TracerProvider, *basic.Controller, error) {\n\n\texp, tp, cntr, err := NewExportPipeline(ctx, driver, exporterOpts...)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\totel.SetTracerProvider(tp)\n\terr = cntr.Start(ctx)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\treturn exp, tp, cntr, err\n}", "func CreatePipeline(codecName string, pipelineStr string, clockRate float32) *Pipeline {\n\t// Generate C String from Input\n\tpipelineStrUnsafe := C.CString(pipelineStr)\n\tdefer C.free(unsafe.Pointer(pipelineStrUnsafe))\n\n\t// Lock Pipelines\n\tpipelinesLock.Lock()\n\tdefer pipelinesLock.Unlock()\n\n\t// Create new Pipeline\n\tpipeline := &Pipeline{\n\t\tPipeline: C.gstreamer_create_pipeline(pipelineStrUnsafe),\n\t\tid: utils.RandSeq(5),\n\t\tcodecName: codecName,\n\t\tclockRate: clockRate,\n\t}\n\tpipeline.outputTracks = []*webrtc.Track{}\n\t// Add new Pipeline\n\tpipelines[pipeline.id] = pipeline\n\treturn pipeline\n}", "func NewPipeline(name string, r *r.Server) Pipeline {\n\tp := Pipeline{name, r, nil}\n\treturn p\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewKeeperSpec generates a new KeeperSpec from a job.KeeperSpec
func NewKeeperSpec(spec *job.KeeperSpec) *KeeperSpec { return &KeeperSpec{ ContractAddress: spec.ContractAddress, FromAddress: spec.FromAddress, CreatedAt: spec.CreatedAt, UpdatedAt: spec.UpdatedAt, EVMChainID: spec.EVMChainID, } }
[ "func NewKeeperSpec(spec *job.KeeperSpec) *KeeperSpec {\n\treturn &KeeperSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tFromAddress: spec.FromAddress,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}", "func NewKeeper(am sdk.AccountMapper) Keeper {\n\treturn Keeper{am: am}\n}", "func NewKeeper(cdc *codec.Codec, key sdk.StoreKey, coinKeeper types.BankKeeper, supplyKeeper types.SupplyKeeper) Keeper {\n\tkeeper := Keeper{\n\t\tCoinKeeper: coinKeeper,\n\t\tSupplyKeeper: supplyKeeper,\n\n\t\tstoreKey: key,\n\t\tcdc: cdc,\n\t}\n\treturn keeper\n}", "func NewKeeper(\n\tcdc codec.BinaryMarshaler,\n\tstoreKey sdk.StoreKey,\n\tparamSpace paramtypes.Subspace,\n\tak authkeeper.AccountKeeper,\n\tbk types.BankKeeper,\n) BaseKeeper {\n\n\t// set KeyTable if it has not already been set\n\tif !paramSpace.HasKeyTable() {\n\t\tparamSpace = paramSpace.WithKeyTable(types.ParamKeyTable())\n\t}\n\n\treturn BaseKeeper{\n\t\tsvcTags: metrics.Tags{\n\t\t\t\"svc\": \"oracle_k\",\n\t\t},\n\t\tparamSpace: paramSpace,\n\n\t\tstoreKey: storeKey,\n\t\tcdc: cdc,\n\t\taccountKeeper: ak,\n\t\tbankKeeper: bk,\n\t\tlogger: log.WithField(\"module\", types.ModuleName),\n\t}\n}", "func NewKeeper(cdc codec.Marshaler, key sdk.StoreKey, wasmKeeper *wasm.Keeper, aiRequestSubspace params.Subspace, stakingKeeper staking.Keeper, bankKeeper bank.Keeper, providerKeeper *provider.Keeper) *Keeper {\n\tif !aiRequestSubspace.HasKeyTable() {\n\t\t// register parameters of the airequest module into the param space\n\t\taiRequestSubspace = aiRequestSubspace.WithKeyTable(types.ParamKeyTable())\n\t}\n\treturn &Keeper{\n\t\tstoreKey: key,\n\t\tcdc: cdc,\n\t\twasmKeeper: wasmKeeper,\n\t\tparamSpace: aiRequestSubspace,\n\t\tstakingKeeper: stakingKeeper,\n\t\tbankKeeper: bankKeeper,\n\t\tproviderKeeper: providerKeeper,\n\t}\n}", "func (in *ZookeeperSpec) DeepCopy() *ZookeeperSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ZookeeperSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewKeeper(storeKey sdk.StoreKey, cdc codec.BinaryCodec) Keeper {\n\treturn Keeper{\n\t\tstoreKey: storeKey,\n\t\tcdc: cdc,\n\t\tupgradeHandlers: map[string]types.UpgradeHandler{},\n\t}\n}", "func NewKeeper(\n\tcdc codec.Marshaler, key sdk.StoreKey,\n\tchannelKeeper types.ChannelKeeper, portKeeper types.PortKeeper,\n\tauthKeeper types.AccountKeeper, bankKeeper types.BankKeeper, scopedKeeper capabilitykeeper.ScopedKeeper,\n) Keeper {\n\n\t// ensure ibc transfer module account is set\n\tif addr := authKeeper.GetModuleAddress(types.ModuleName); addr == nil {\n\t\tpanic(\"the IBC transfer module account has not been set\")\n\t}\n\n\treturn Keeper{\n\t\tstoreKey: key,\n\t\tcdc: cdc,\n\t\tchannelKeeper: channelKeeper,\n\t\tportKeeper: portKeeper,\n\t\tauthKeeper: authKeeper,\n\t\tbankKeeper: bankKeeper,\n\t\tscopedKeeper: scopedKeeper,\n\t}\n}", "func (k *Keeper) createLegacyKeeperJob(client cmd.HTTPClient, registryAddr, nodeAddr string) error {\n\trequest, err := json.Marshal(web.CreateJobRequest{\n\t\tTOML: testspecs.GenerateKeeperSpec(testspecs.KeeperSpecParams{\n\t\t\tName: fmt.Sprintf(\"keeper job - registry %s\", registryAddr),\n\t\t\tContractAddress: registryAddr,\n\t\t\tFromAddress: nodeAddr,\n\t\t\tEvmChainID: int(k.cfg.ChainID),\n\t\t}).Toml(),\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal request: %s\", err)\n\t}\n\n\tresp, err := client.Post(\"/v2/jobs\", bytes.NewReader(request))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create keeper job: %s\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode >= 400 {\n\t\tbody, err := io.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to read error response body: %s\", err)\n\t\t}\n\n\t\treturn fmt.Errorf(\"unable to create keeper job: '%v' [%d]\", string(body), resp.StatusCode)\n\t}\n\n\treturn nil\n}", "func NewTestKeeper(t testing.TB, isCheckTx bool) (Keeper, sdk.Context) {\n\tcdc := NewTestCodec()\n\t// generate store\n\tmdb := db.NewMemDB()\n\t// generate multistore\n\tms := store.NewCommitMultiStore(mdb)\n\t// generate store keys\n\tconfigurationStoreKey := sdk.NewKVStoreKey(StoreKey) // configuration module store key\n\t// generate sub store for each module referenced by the keeper\n\tms.MountStoreWithDB(configurationStoreKey, sdk.StoreTypeIAVL, mdb) // mount configuration module\n\t// test no errors\n\trequire.Nil(t, ms.LoadLatestVersion())\n\t// create context\n\tctx := sdk.NewContext(ms, tmproto.Header{Time: time.Now()}, isCheckTx, log.NewNopLogger())\n\t// create domain.Keeper\n\treturn NewKeeper(cdc, configurationStoreKey, nil), ctx\n}", "func NewKeeper(client *api.Client, keyID string, opts *KeeperOptions) *secrets.Keeper {\n\treturn secrets.NewKeeper(&keeper{\n\t\tkeyID: keyID,\n\t\tclient: client,\n\t})\n}", "func NewSpec(serviceAccountName string, containers []corev1.Container, volumes []corev1.Volume) *Builder {\n\treturn &Builder{spec: newPodSpec(serviceAccountName, containers, volumes)}\n}", "func (k *Keeper) createKeeperJob(client cmd.HTTPClient, registryAddr, nodeAddr string) error {\n\tvar err error\n\tif k.cfg.OCR2Keepers {\n\t\terr = k.createOCR2KeeperJob(client, registryAddr, nodeAddr)\n\t} else {\n\t\terr = k.createLegacyKeeperJob(client, registryAddr, nodeAddr)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Println(\"Keeper job has been successfully created in the Chainlink node with address: \", nodeAddr)\n\n\treturn nil\n}", "func (k *Keeper) createKeeperJob(client cmd.HTTPClient, registryAddr, nodeAddr string) error {\n\trequest, err := json.Marshal(web.CreateJobRequest{\n\t\tTOML: testspecs.GenerateKeeperSpec(testspecs.KeeperSpecParams{\n\t\t\tName: fmt.Sprintf(\"keeper job - registry %s\", registryAddr),\n\t\t\tContractAddress: registryAddr,\n\t\t\tFromAddress: nodeAddr,\n\t\t\tEvmChainID: int(k.cfg.ChainID),\n\t\t\tMinIncomingConfirmations: 1,\n\t\t\tObservationSource: keeper.ExpectedObservationSource,\n\t\t}).Toml(),\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal request: %s\", err)\n\t}\n\n\tresp, err := client.Post(\"/v2/jobs\", bytes.NewReader(request))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create keeper job: %s\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode >= 400 {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to read error response body: %s\", err)\n\t\t}\n\n\t\treturn fmt.Errorf(\"unable to create keeper job: '%v' [%d]\", string(body), resp.StatusCode)\n\t}\n\tlog.Println(\"Keeper job has been successfully created in the Chainlink node with address: \", nodeAddr)\n\treturn nil\n}", "func (in *BookkeeperSpec) DeepCopy() *BookkeeperSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(BookkeeperSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewKeeperStatefulset(cc *api.CitusCluster) *apps.StatefulSet {\n\tinstanceName := cc.GetLabels()[label.InstanceLabelKey]\n\tkeeperLabel := label.New().Instance(instanceName).Keeper()\n\tstolonDataVol := \"data\"\n\tvolMounts := []corev1.VolumeMount{\n\t\t{Name: stolonDataVol, MountPath: \"/stolon-data\"},\n\t\t{Name: \"stolon\", ReadOnly: false, MountPath: \"/etc/secrets/stolon\"},\n\t}\n\tvols := []corev1.Volume{\n\t\t{Name: \"stolon\", VolumeSource: corev1.VolumeSource{\n\t\t\tSecret: &corev1.SecretVolumeSource{\n\t\t\t\tSecretName: \"stolon\",\n\t\t\t}},\n\t\t},\n\t}\n\tannos := map[string]string{\n\t\t\"pod.alpha.kubernetes.io/initialized\": \"true\",\n\t\t\"prometheus.io/scrape\": \"true\",\n\t\t\"prometheus.io/port/keeper\": \"8080\",\n\t\t\"prometheus.io/port/sentinel\": \"8081\",\n\t}\n\tq, _ := resource.ParseQuantity(defaultKeeperStorageSize)\n\tif cc.Spec.Keeper.Requests != nil {\n\t\tsize := cc.Spec.Keeper.Requests.Storage\n\t\tvar err error\n\t\tq, err = resource.ParseQuantity(size)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"cant' get storage size: %s for CitusCluster: %s/%s, %v\", size, cc.Namespace, cc.Name, err))\n\t\t}\n\t}\n\tcmd := []string{\n\t\t\"/bin/bash\",\n\t\t\"-ec\",\n\t\t`IFS='-' read -ra ADDR <<< \"$(hostname)\"\n\t\t export STKEEPER_UID=keeper\"${ADDR[-1]}\"\n\t\t export POD_IP=$(hostname -i)\n\t\t export STKEEPER_PG_LISTEN_ADDRESS=$POD_IP\n\t\t export STOLON_DATA=/stolon-data\n\t\t chown stolon:stolon $STOLON_DATA\n\t\t exec gosu stolon stolon-sentinel &;\n\t\t exec gosu stolon stolon-keeper --data-dir $STOLON_DATA&`,\n\t}\n\tss := &apps.StatefulSet{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cc.Name,\n\t\t\tNamespace: cc.Namespace,\n\t\t\tLabels: keeperLabel.Labels(),\n\t\t\tOwnerReferences: []metav1.OwnerReference{GetOwnerRef(cc)},\n\t\t},\n\t\tSpec: apps.StatefulSetSpec{\n\t\t\tServiceName: cc.Name,\n\t\t\tReplicas: func() *int32 { r := cc.Spec.Keeper.Size; return &r }(),\n\t\t\tSelector: keeperLabel.LabelSelector(),\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: keeperLabel.Labels(),\n\t\t\t\t\tAnnotations: annos,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\n\t\t\t\t\t// SchedulerName: cc.Spec.SchedulerName,\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"stolon-keeper\",\n\t\t\t\t\t\t\tImage: cc.Spec.Keeper.Image,\n\t\t\t\t\t\t\tCommand: cmd,\n\t\t\t\t\t\t\tImagePullPolicy: cc.Spec.Keeper.ImagePullPolicy,\n\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"keeperPort\",\n\t\t\t\t\t\t\t\t\tContainerPort: int32(KeeperPort),\n\t\t\t\t\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"keeperMetricsPort\",\n\t\t\t\t\t\t\t\t\tContainerPort: int32(8080),\n\t\t\t\t\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"sentinelMetricsPort\",\n\t\t\t\t\t\t\t\t\tContainerPort: int32(8081),\n\t\t\t\t\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVolumeMounts: volMounts,\n\t\t\t\t\t\t\tResources: util.ResourceRequirement(cc.Spec.Keeper.ContainerSpec),\n\t\t\t\t\t\t\tEnv: []corev1.EnvVar{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"NAMESPACE\",\n\t\t\t\t\t\t\t\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\t\t\t\t\t\t\t\tFieldRef: &corev1.ObjectFieldSelector{\n\t\t\t\t\t\t\t\t\t\t\tFieldPath: \"metadata.namespace\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"POD_NAME\",\n\t\t\t\t\t\t\t\t\tValueFrom: &corev1.EnvVarSource{\n\t\t\t\t\t\t\t\t\t\tFieldRef: &corev1.ObjectFieldSelector{\n\t\t\t\t\t\t\t\t\t\t\tFieldPath: \"metadata.name\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"STKEEPER_CLUSTER_NAME\",\n\t\t\t\t\t\t\t\t\tValue: cc.Name,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"STKEEPER_STORE_BACKEND\",\n\t\t\t\t\t\t\t\t\tValue: \"kubernetes\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"STKEEPER_KUBE_RESOURCE_KIND\",\n\t\t\t\t\t\t\t\t\tValue: \"configmap\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"STKEEPER_PG_REPL_USERNAME\",\n\t\t\t\t\t\t\t\t\tValue: \"repluser\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"STKEEPER_PG_REPL_PASSWORD\",\n\t\t\t\t\t\t\t\t\tValue: \"replpassword\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"STKEEPER_PG_SU_USERNAME\",\n\t\t\t\t\t\t\t\t\tValue: \"stolon\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"STKEEPER_PG_SU_PASSWORDFILE\",\n\t\t\t\t\t\t\t\t\tValue: \"/etc/secrets/stolon/password\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"STKEEPER_METRICS_LISTEN_ADDRESS\",\n\t\t\t\t\t\t\t\t\tValue: \"0.0.0.0:8080\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"STSENTINEL_CLUSTER_NAME\",\n\t\t\t\t\t\t\t\t\tValue: cc.Name,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"STSENTINEL_STORE_BACKEND\",\n\t\t\t\t\t\t\t\t\tValue: \"kubernetes\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"STSENTINEL_KUBE_RESOURCE_KIND\",\n\t\t\t\t\t\t\t\t\tValue: \"configmap\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"STSENTINEL_METRICS_LISTEN_ADDRESS\",\n\t\t\t\t\t\t\t\t\tValue: \"0.0.0.0:8081\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tRestartPolicy: corev1.RestartPolicyAlways,\n\t\t\t\t\t// Tolerations: cc.Spec.Store.Tolerations,\n\t\t\t\t\tVolumes: vols,\n\t\t\t\t},\n\t\t\t},\n\t\t\tVolumeClaimTemplates: []corev1.PersistentVolumeClaim{\n\t\t\t\tVolumeClaimTemplates(q, stolonDataVol, func() *string { s := cc.Spec.Keeper.StorageClassName; return &s }()),\n\t\t\t},\n\t\t\tPodManagementPolicy: apps.ParallelPodManagement,\n\t\t\tUpdateStrategy: apps.StatefulSetUpdateStrategy{\n\t\t\t\tType: apps.RollingUpdateStatefulSetStrategyType,\n\t\t\t\tRollingUpdate: &apps.RollingUpdateStatefulSetStrategy{\n\t\t\t\t\tPartition: func() *int32 { r := cc.Spec.Keeper.Size; return &r }(),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn ss\n}", "func CreateTestKeepers(t *testing.T, consensusNeeded float64, validatorAmounts []int64, extraMaccPerm string) (sdk.Context, keeper.Keeper, bankkeeper.Keeper, authkeeper.AccountKeeper, oraclekeeper.Keeper, simappparams.EncodingConfig, []sdk.ValAddress) {\n\n\tPKs := CreateTestPubKeys(500)\n\tkeyStaking := sdk.NewKVStoreKey(stakingtypes.StoreKey)\n\t// TODO: staking.TStoreKey removed in favor of?\n\ttkeyStaking := sdk.NewTransientStoreKey(\"transient_staking\")\n\tkeyAcc := sdk.NewKVStoreKey(authtypes.StoreKey)\n\tkeyParams := sdk.NewKVStoreKey(paramstypes.StoreKey)\n\ttkeyParams := sdk.NewTransientStoreKey(paramstypes.TStoreKey)\n\tkeyBank := sdk.NewKVStoreKey(banktypes.StoreKey)\n\tkeyOracle := sdk.NewKVStoreKey(oracleTypes.StoreKey)\n\tkeyEthBridge := sdk.NewKVStoreKey(types.StoreKey)\n\n\tdb := dbm.NewMemDB()\n\tms := store.NewCommitMultiStore(db)\n\tms.MountStoreWithDB(tkeyStaking, sdk.StoreTypeTransient, nil)\n\tms.MountStoreWithDB(keyStaking, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyAcc, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyParams, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(tkeyParams, sdk.StoreTypeTransient, db)\n\tms.MountStoreWithDB(keyBank, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyOracle, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyEthBridge, sdk.StoreTypeIAVL, db)\n\terr := ms.LoadLatestVersion()\n\trequire.NoError(t, err)\n\n\tctx := sdk.NewContext(ms, tmproto.Header{ChainID: \"foochainid\"}, false, nil)\n\tctx = ctx.WithConsensusParams(\n\t\t&abci.ConsensusParams{\n\t\t\tValidator: &tmproto.ValidatorParams{\n\t\t\t\tPubKeyTypes: []string{tmtypes.ABCIPubKeyTypeEd25519},\n\t\t\t},\n\t\t},\n\t)\n\tctx = ctx.WithLogger(log.NewNopLogger())\n\tencCfg := MakeTestEncodingConfig()\n\n\tbridgeAccount := authtypes.NewEmptyModuleAccount(types.ModuleName, authtypes.Burner, authtypes.Minter)\n\n\tfeeCollectorAcc := authtypes.NewEmptyModuleAccount(authtypes.FeeCollectorName)\n\tnotBondedPool := authtypes.NewEmptyModuleAccount(stakingtypes.NotBondedPoolName, authtypes.Burner, authtypes.Staking)\n\tbondPool := authtypes.NewEmptyModuleAccount(stakingtypes.BondedPoolName, authtypes.Burner, authtypes.Staking)\n\n\tblacklistedAddrs := make(map[string]bool)\n\tblacklistedAddrs[feeCollectorAcc.GetAddress().String()] = true\n\tblacklistedAddrs[notBondedPool.GetAddress().String()] = true\n\tblacklistedAddrs[bondPool.GetAddress().String()] = true\n\n\tmaccPerms := map[string][]string{\n\t\tauthtypes.FeeCollectorName: nil,\n\t\tstakingtypes.NotBondedPoolName: {authtypes.Burner, authtypes.Staking},\n\t\tstakingtypes.BondedPoolName: {authtypes.Burner, authtypes.Staking},\n\t\ttypes.ModuleName: {authtypes.Burner, authtypes.Minter},\n\t}\n\n\tif extraMaccPerm != \"\" {\n\t\tmaccPerms[extraMaccPerm] = []string{authtypes.Burner, authtypes.Minter}\n\t}\n\n\tparamsKeeper := paramskeeper.NewKeeper(encCfg.Marshaler, encCfg.Amino, keyParams, tkeyParams)\n\n\t//accountKeeper gets maccParams in 0.40, module accounts moved from supplykeeper to authkeeper\n\taccountKeeper := authkeeper.NewAccountKeeper(\n\t\tencCfg.Marshaler, // amino codec\n\t\tkeyAcc, // target store\n\t\tparamsKeeper.Subspace(authtypes.ModuleName),\n\t\tauthtypes.ProtoBaseAccount, // prototype,\n\t\tmaccPerms,\n\t)\n\n\tbankKeeper := bankkeeper.NewBaseKeeper(\n\t\tencCfg.Marshaler,\n\t\tkeyBank,\n\t\taccountKeeper,\n\t\tparamsKeeper.Subspace(banktypes.ModuleName),\n\t\tblacklistedAddrs,\n\t)\n\n\tinitTokens := sdk.TokensFromConsensusPower(10000)\n\ttotalSupply := sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initTokens.MulRaw(int64(100))))\n\n\tbankKeeper.SetSupply(ctx, banktypes.NewSupply(totalSupply))\n\n\tstakingKeeper := stakingkeeper.NewKeeper(encCfg.Marshaler, keyStaking, accountKeeper, bankKeeper, paramsKeeper.Subspace(stakingtypes.ModuleName))\n\tstakingKeeper.SetParams(ctx, stakingtypes.DefaultParams())\n\toracleKeeper := oraclekeeper.NewKeeper(encCfg.Marshaler, keyOracle, stakingKeeper, consensusNeeded)\n\n\t// set module accounts\n\terr = bankKeeper.AddCoins(ctx, notBondedPool.GetAddress(), totalSupply)\n\trequire.NoError(t, err)\n\n\taccountKeeper.SetModuleAccount(ctx, bridgeAccount)\n\taccountKeeper.SetModuleAccount(ctx, feeCollectorAcc)\n\taccountKeeper.SetModuleAccount(ctx, bondPool)\n\taccountKeeper.SetModuleAccount(ctx, notBondedPool)\n\n\tethbridgeKeeper := keeper.NewKeeper(encCfg.Marshaler, bankKeeper, oracleKeeper, accountKeeper, keyEthBridge)\n\tCethReceiverAccount, _ := sdk.AccAddressFromBech32(TestCethReceiverAddress)\n\tethbridgeKeeper.SetCethReceiverAccount(ctx, CethReceiverAccount)\n\n\t// Setup validators\n\tvalAddrs := make([]sdk.ValAddress, len(validatorAmounts))\n\tfor i, amount := range validatorAmounts {\n\t\tvalPubKey := PKs[i]\n\t\tvalAddr := sdk.ValAddress(valPubKey.Address().Bytes())\n\t\tvalAddrs[i] = valAddr\n\t\tvalTokens := sdk.TokensFromConsensusPower(amount)\n\t\t// test how the validator is set from a purely unbonbed pool\n\t\tvalidator, err := stakingtypes.NewValidator(valAddr, valPubKey, stakingtypes.Description{})\n\t\trequire.NoError(t, err)\n\n\t\tvalidator, _ = validator.AddTokensFromDel(valTokens)\n\t\tstakingKeeper.SetValidator(ctx, validator)\n\t\tstakingKeeper.SetValidatorByPowerIndex(ctx, validator)\n\t\t_, err = stakingKeeper.ApplyAndReturnValidatorSetUpdates(ctx)\n\t\tif err != nil {\n\t\t\tpanic(\"Failed to apply validator set updates\")\n\t\t}\n\t}\n\n\toracleKeeper.SetOracleWhiteList(ctx, valAddrs)\n\n\treturn ctx, ethbridgeKeeper, bankKeeper, accountKeeper, oracleKeeper, encCfg, valAddrs\n}", "func CreateTestKeepers(t *testing.T, consensusNeeded float64, validatorAmounts []int64, extraMaccPerm string) (sdk.Context, Keeper, bank.Keeper, supply.Keeper, auth.AccountKeeper, []sdk.ValAddress) {\n\tPKs := CreateTestPubKeys(500)\n\tkeyStaking := sdk.NewKVStoreKey(stakingtypes.StoreKey)\n\ttkeyStaking := sdk.NewTransientStoreKey(stakingtypes.TStoreKey)\n\tkeyAcc := sdk.NewKVStoreKey(auth.StoreKey)\n\tkeyParams := sdk.NewKVStoreKey(params.StoreKey)\n\ttkeyParams := sdk.NewTransientStoreKey(params.TStoreKey)\n\tkeySupply := sdk.NewKVStoreKey(supply.StoreKey)\n\tkeyOracle := sdk.NewKVStoreKey(types.StoreKey)\n\n\tdb := dbm.NewMemDB()\n\tms := store.NewCommitMultiStore(db)\n\tms.MountStoreWithDB(tkeyStaking, sdk.StoreTypeTransient, nil)\n\tms.MountStoreWithDB(keyStaking, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyAcc, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyParams, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(tkeyParams, sdk.StoreTypeTransient, db)\n\tms.MountStoreWithDB(keySupply, sdk.StoreTypeIAVL, db)\n\tms.MountStoreWithDB(keyOracle, sdk.StoreTypeIAVL, db)\n\terr := ms.LoadLatestVersion()\n\trequire.Nil(t, err)\n\n\tctx := sdk.NewContext(ms, abci.Header{ChainID: \"foochainid\"}, false, nil)\n\tctx = ctx.WithConsensusParams(\n\t\t&abci.ConsensusParams{\n\t\t\tValidator: &abci.ValidatorParams{\n\t\t\t\tPubKeyTypes: []string{tmtypes.ABCIPubKeyTypeEd25519},\n\t\t\t},\n\t\t},\n\t)\n\tctx = ctx.WithLogger(log.NewNopLogger())\n\tcdc := MakeTestCodec()\n\n\tfeeCollectorAcc := supply.NewEmptyModuleAccount(auth.FeeCollectorName)\n\tnotBondedPool := supply.NewEmptyModuleAccount(stakingtypes.NotBondedPoolName, supply.Burner, supply.Staking)\n\tbondPool := supply.NewEmptyModuleAccount(stakingtypes.BondedPoolName, supply.Burner, supply.Staking)\n\n\tblacklistedAddrs := make(map[string]bool)\n\tblacklistedAddrs[feeCollectorAcc.GetAddress().String()] = true\n\tblacklistedAddrs[notBondedPool.GetAddress().String()] = true\n\tblacklistedAddrs[bondPool.GetAddress().String()] = true\n\n\tparamsKeeper := params.NewKeeper(cdc, keyParams, tkeyParams, params.DefaultCodespace)\n\n\taccountKeeper := auth.NewAccountKeeper(\n\t\tcdc, // amino codec\n\t\tkeyAcc, // target store\n\t\tparamsKeeper.Subspace(auth.DefaultParamspace),\n\t\tauth.ProtoBaseAccount, // prototype\n\t)\n\n\tbankKeeper := bank.NewBaseKeeper(\n\t\taccountKeeper,\n\t\tparamsKeeper.Subspace(bank.DefaultParamspace),\n\t\tbank.DefaultCodespace,\n\t\tblacklistedAddrs,\n\t)\n\n\tmaccPerms := map[string][]string{\n\t\tauth.FeeCollectorName: nil,\n\t\tstakingtypes.NotBondedPoolName: {supply.Burner, supply.Staking},\n\t\tstakingtypes.BondedPoolName: {supply.Burner, supply.Staking},\n\t}\n\n\tif extraMaccPerm != \"\" {\n\t\tmaccPerms[extraMaccPerm] = []string{supply.Burner, supply.Minter}\n\t}\n\n\tsupplyKeeper := supply.NewKeeper(cdc, keySupply, accountKeeper, bankKeeper, maccPerms)\n\n\tinitTokens := sdk.TokensFromConsensusPower(10000)\n\ttotalSupply := sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, initTokens.MulRaw(int64(100))))\n\n\tsupplyKeeper.SetSupply(ctx, supply.NewSupply(totalSupply))\n\n\tstakingKeeper := staking.NewKeeper(cdc, keyStaking, supplyKeeper, paramsKeeper.Subspace(staking.DefaultParamspace), stakingtypes.DefaultCodespace)\n\tstakingKeeper.SetParams(ctx, stakingtypes.DefaultParams())\n\toracleKeeper := NewKeeper(cdc, keyOracle, stakingKeeper, types.DefaultCodespace, consensusNeeded)\n\n\t// set module accounts\n\terr = notBondedPool.SetCoins(totalSupply)\n\trequire.NoError(t, err)\n\n\tsupplyKeeper.SetModuleAccount(ctx, feeCollectorAcc)\n\tsupplyKeeper.SetModuleAccount(ctx, bondPool)\n\tsupplyKeeper.SetModuleAccount(ctx, notBondedPool)\n\n\t// Setup validators\n\tvalAddrs := make([]sdk.ValAddress, len(validatorAmounts))\n\tfor i, amount := range validatorAmounts {\n\t\tvalPubKey := PKs[i]\n\t\tvalAddr := sdk.ValAddress(valPubKey.Address().Bytes())\n\t\tvalAddrs[i] = valAddr\n\t\tvalTokens := sdk.TokensFromConsensusPower(amount)\n\t\t// test how the validator is set from a purely unbonbed pool\n\t\tvalidator := stakingtypes.NewValidator(valAddr, valPubKey, stakingtypes.Description{})\n\t\tvalidator, _ = validator.AddTokensFromDel(valTokens)\n\t\tstakingKeeper.SetValidator(ctx, validator)\n\t\tstakingKeeper.SetValidatorByPowerIndex(ctx, validator)\n\t\tstakingKeeper.ApplyAndReturnValidatorSetUpdates(ctx)\n\t}\n\n\treturn ctx, oracleKeeper, bankKeeper, supplyKeeper, accountKeeper, valAddrs\n}", "func newKubeBuilder(appMan Manifest) Builder {\n\treturn &KubeBuilder{Manifest: appMan}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewWebhookSpec generates a new WebhookSpec from a job.WebhookSpec
func NewWebhookSpec(spec *job.WebhookSpec) *WebhookSpec { return &WebhookSpec{ CreatedAt: spec.CreatedAt, UpdatedAt: spec.UpdatedAt, } }
[ "func (in *GitHubWebhookSpec) DeepCopy() *GitHubWebhookSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(GitHubWebhookSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewWebhook(log logr.Logger) Webhook {\n\tac := serializer.NewCodecFactory(runtime.NewScheme())\n\td := ac.UniversalDeserializer()\n\treturn Webhook{\n\t\tLog: log,\n\t\tadmissionDecoder: d,\n\t}\n}", "func New(mgr manager.Manager) (*extensionswebhook.Webhook, error) {\n\tlogger.Info(\"Setting up webhook\", \"name\", Name)\n\n\treturn extensionswebhook.New(mgr, extensionswebhook.Args{\n\t\tProvider: calico.Name,\n\t\tName: Name,\n\t\tPath: \"/webhooks/validate\",\n\t\tPredicates: []predicate.Predicate{createCalicoPredicate()},\n\t\tValidators: map[extensionswebhook.Validator][]extensionswebhook.Type{\n\t\t\tNewShootValidator(): {{Obj: &core.Shoot{}}},\n\t\t},\n\t})\n}", "func (hookReconciler *EventHookReconciler) createDeploymentSpec(name string, labels map[string]string, configMapName string, existingDeployment *appsv1.Deployment) (*appsv1.Deployment, error) {\n\n\tpipelineDeployment := hookReconciler.pipelineDeployment\n\thookConfig := hookReconciler.hookConfig\n\n\t// Set the image name\n\timagePullPolicy := corev1.PullIfNotPresent\n\timageName := os.Getenv(\"HOOK_IMAGE\")\n\tif imageName == \"\" {\n\t\tif hookConfig.Image == nil {\n\t\t\timageName = \"algohub/hook-runner:latest\"\n\t\t} else {\n\t\t\tif hookConfig.Image.Tag == \"\" {\n\t\t\t\timageName = fmt.Sprintf(\"%s:latest\", hookConfig.Image.Repository)\n\t\t\t} else {\n\t\t\t\timageName = fmt.Sprintf(\"%s:%s\", hookConfig.Image.Repository, hookConfig.Image.Tag)\n\t\t\t}\n\t\t\tswitch *hookConfig.Image.ImagePullPolicy {\n\t\t\tcase \"Never\":\n\t\t\t\timagePullPolicy = corev1.PullNever\n\t\t\tcase \"PullAlways\":\n\t\t\t\timagePullPolicy = corev1.PullAlways\n\t\t\tcase \"IfNotPresent\":\n\t\t\t\timagePullPolicy = corev1.PullIfNotPresent\n\t\t\tdefault:\n\t\t\t\timagePullPolicy = corev1.PullIfNotPresent\n\t\t\t}\n\t\t}\n\t}\n\n\t// Configure the readiness and liveness\n\thandler := corev1.Handler{\n\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\tScheme: \"HTTP\",\n\t\t\tPath: \"/health\",\n\t\t\tPort: intstr.FromInt(10080),\n\t\t},\n\t}\n\n\tvolumes := []corev1.Volume{}\n\tvolumeMounts := []corev1.VolumeMount{}\n\n\t// Create kafka tls volumes and mounts if tls enabled\n\tkafkaUtil := hookReconciler.kafkaUtil\n\tif hookReconciler.kafkaUtil.TLS != nil {\n\n\t\tkafkaTLSVolumes := []corev1.Volume{\n\t\t\t{\n\t\t\t\tName: \"kafka-ca-certs\",\n\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\tSecret: &corev1.SecretVolumeSource{\n\t\t\t\t\t\tSecretName: kafkaUtil.TLS.TrustedCertificates[0].SecretName,\n\t\t\t\t\t\tDefaultMode: utils.Int32p(0444),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tvolumes = append(volumes, kafkaTLSVolumes...)\n\n\t\tkafkaTLSMounts := []corev1.VolumeMount{\n\t\t\t{\n\t\t\t\tName: \"kafka-ca-certs\",\n\t\t\t\tSubPath: kafkaUtil.TLS.TrustedCertificates[0].Certificate,\n\t\t\t\tMountPath: \"/etc/ssl/certs/kafka-ca.crt\",\n\t\t\t\tReadOnly: true,\n\t\t\t},\n\t\t}\n\t\tvolumeMounts = append(volumeMounts, kafkaTLSMounts...)\n\t}\n\n\tif kafkaUtil.Authentication != nil {\n\n\t\tkafkaAuthVolumes := []corev1.Volume{\n\t\t\t{\n\t\t\t\tName: \"kafka-certs\",\n\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\tSecret: &corev1.SecretVolumeSource{\n\t\t\t\t\t\tSecretName: kafkaUtil.Authentication.CertificateAndKey.SecretName,\n\t\t\t\t\t\tDefaultMode: utils.Int32p(0444),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tvolumes = append(volumes, kafkaAuthVolumes...)\n\n\t\tkafkaAuthMounts := []corev1.VolumeMount{\n\t\t\t{\n\t\t\t\tName: \"kafka-certs\",\n\t\t\t\tSubPath: kafkaUtil.Authentication.CertificateAndKey.Certificate,\n\t\t\t\tMountPath: \"/etc/ssl/certs/kafka-user.crt\",\n\t\t\t\tReadOnly: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"kafka-certs\",\n\t\t\t\tSubPath: kafkaUtil.Authentication.CertificateAndKey.Key,\n\t\t\t\tMountPath: \"/etc/ssl/certs/kafka-user.key\",\n\t\t\t\tReadOnly: true,\n\t\t\t},\n\t\t}\n\t\tvolumeMounts = append(volumeMounts, kafkaAuthMounts...)\n\t}\n\n\tconfigMapVolume := corev1.Volume{\n\t\tName: \"hook-config-volume\",\n\t\tVolumeSource: corev1.VolumeSource{\n\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\tLocalObjectReference: v1.LocalObjectReference{Name: configMapName},\n\t\t\t\tDefaultMode: utils.Int32p(0444),\n\t\t\t},\n\t\t},\n\t}\n\tvolumes = append(volumes, configMapVolume)\n\n\t// Add config mount\n\tconfigVolumeMount := corev1.VolumeMount{\n\t\tName: \"hook-config-volume\",\n\t\tSubPath: \"hook-config\",\n\t\tMountPath: \"/hook-config/hook-config.json\",\n\t}\n\tvolumeMounts = append(volumeMounts, configVolumeMount)\n\n\tvar containers []corev1.Container\n\n\thookCommand := []string{\"/hook-runner/hook-runner\"}\n\thookEnvVars := hookReconciler.createEnvVars(pipelineDeployment, hookConfig)\n\n\treadinessProbe := &corev1.Probe{\n\t\tHandler: handler,\n\t\tInitialDelaySeconds: 10,\n\t\tTimeoutSeconds: 10,\n\t\tPeriodSeconds: 20,\n\t\tSuccessThreshold: 1,\n\t\tFailureThreshold: 3,\n\t}\n\n\tlivenessProbe := &corev1.Probe{\n\t\tHandler: handler,\n\t\tInitialDelaySeconds: 10,\n\t\tTimeoutSeconds: 10,\n\t\tPeriodSeconds: 20,\n\t\tSuccessThreshold: 1,\n\t\tFailureThreshold: 3,\n\t}\n\n\tkubeUtil := utils.NewKubeUtil(hookReconciler.manager, hookReconciler.request)\n\tresources, resourceErr := kubeUtil.CreateResourceReqs(hookConfig.Resources)\n\n\tif resourceErr != nil {\n\t\treturn nil, resourceErr\n\t}\n\n\tconfigArgs := []string{\"--config=/hook-config/hook-config.json\"}\n\n\t// Hook container\n\thookContainer := corev1.Container{\n\t\tName: name,\n\t\tImage: imageName,\n\t\tCommand: hookCommand,\n\t\tArgs: configArgs,\n\t\tEnv: hookEnvVars,\n\t\tResources: *resources,\n\t\tImagePullPolicy: imagePullPolicy,\n\t\tLivenessProbe: livenessProbe,\n\t\tReadinessProbe: readinessProbe,\n\t\tTerminationMessagePath: \"/dev/termination-log\",\n\t\tTerminationMessagePolicy: \"File\",\n\t}\n\tcontainers = append(containers, hookContainer)\n\n\t// nodeSelector := createSelector(request.Constraints)\n\n\t// If this is an update, need to set the existing deployment name\n\tvar nameMeta metav1.ObjectMeta\n\tif existingDeployment != nil {\n\t\tnameMeta = metav1.ObjectMeta{\n\t\t\tNamespace: pipelineDeployment.Spec.DeploymentNamespace,\n\t\t\tName: existingDeployment.Name,\n\t\t\tLabels: labels,\n\t\t\t// Annotations: annotations,\n\t\t}\n\t} else {\n\t\tnameMeta = metav1.ObjectMeta{\n\t\t\tNamespace: pipelineDeployment.Spec.DeploymentNamespace,\n\t\t\tGenerateName: fmt.Sprintf(\"%s-\", name),\n\t\t\tLabels: labels,\n\t\t\t// Annotations: annotations,\n\t\t}\n\t}\n\n\t// annotations := buildAnnotations(request)\n\tdeploymentSpec := &appsv1.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Deployment\",\n\t\t\tAPIVersion: \"apps/v1\",\n\t\t},\n\t\tObjectMeta: nameMeta,\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: labels,\n\t\t\t},\n\t\t\tReplicas: &hookConfig.Replicas,\n\t\t\tStrategy: appsv1.DeploymentStrategy{\n\t\t\t\tType: appsv1.RollingUpdateDeploymentStrategyType,\n\t\t\t\tRollingUpdate: &appsv1.RollingUpdateDeployment{\n\t\t\t\t\tMaxUnavailable: &intstr.IntOrString{\n\t\t\t\t\t\tType: intstr.Int,\n\t\t\t\t\t\tIntVal: int32(0),\n\t\t\t\t\t},\n\t\t\t\t\tMaxSurge: &intstr.IntOrString{\n\t\t\t\t\t\tType: intstr.Int,\n\t\t\t\t\t\tIntVal: int32(1),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRevisionHistoryLimit: utils.Int32p(10),\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: nameMeta,\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\t// SecurityContext: &corev1.PodSecurityContext{\n\t\t\t\t\t//\tFSGroup: int64p(1431),\n\t\t\t\t\t// },\n\t\t\t\t\t// NodeSelector: nodeSelector,\n\t\t\t\t\tContainers: containers,\n\t\t\t\t\tRestartPolicy: corev1.RestartPolicyAlways,\n\t\t\t\t\tDNSPolicy: corev1.DNSClusterFirst,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// if err := UpdateSecrets(request, deploymentSpec, existingSecrets); err != nil {\n\t// \treturn nil, err\n\t// }\n\n\treturn deploymentSpec, nil\n\n}", "func NewWebhook(ctx *pulumi.Context,\n\tname string, args *WebhookArgs, opts ...pulumi.ResourceOption) (*Webhook, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Name == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Name'\")\n\t}\n\tif args.Url == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Url'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Webhook\n\terr := ctx.RegisterResource(\"datadog:index/webhook:Webhook\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func WebhookNewTrack(w http.ResponseWriter, r *http.Request) {\n\n\t//Decode incoming url\n\tvar hookStruct webhookStruct\n\thookStruct.WebhookURL = \"\"\n\n\tdecoder := json.NewDecoder(r.Body)\n\terr := decoder.Decode(&hookStruct)\n\tif err != nil {\n\t\t//Malformed content body.\n\t\thttp.Error(w, \"Malformed content body\", http.StatusBadRequest)\n\n\t\treturn //Stop whatever we are doing..\n\t}\n\n\tif hookStruct.WebhookURL == \"\" {\n\t\t//Malformed content body.\n\t\thttp.Error(w, \"Malformed content body\", http.StatusBadRequest)\n\n\t\treturn //Stop whatever we are doing..\n\t}\n\n\tif hookStruct.MinTriggerValue < 1 {\n\t\thookStruct.MinTriggerValue = 1\n\t}\n\n\thookStruct.WebhookID = lastWebhookID\n\tinsertWebhook(&hookStruct, &Credentials)\n\n\t//Add ID to array for used ids\n\twebhookID = append(webhookID, lastWebhookID)\n\n\tvar newID = strconv.Itoa(lastWebhookID)\n\n\t//Remember to count up used ids\n\tlastWebhookID++\n\t//Specify content type\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\n\t//Return the struct as a json object.\n\terr = json.NewEncoder(w).Encode(newID)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}", "func GenJobSpec(jobID, image string, cf *ConfigSetting, entryPoint []string, command []string, labels map[string]string, webhookSetting *WebhookSetting) *batchv1.Job {\n\t// TODO: retry should be changeable ?\n\tbackoffLimit := int32(0)\n\tttlSecondsAfterFinished := int32(3600)\n\tpodSpec := apiv1.PodSpec{\n\t\tContainers: []apiv1.Container{\n\t\t\t{\n\t\t\t\tName: \"smile-job\",\n\t\t\t\tImage: image,\n\t\t\t\tCommand: entryPoint,\n\t\t\t\tArgs: command,\n\t\t\t\t// Resources: apiv1.ResourceRequirements{\n\t\t\t\t// \tLimits: apiv1.ResourceList{\n\t\t\t\t// \t\t\"cpu\": resource.MustParse(cpuLimit),\n\t\t\t\t// \t\t\"memory\": resource.MustParse(memLimit),\n\t\t\t\t// \t},\n\t\t\t\t// \tRequests: apiv1.ResourceList{\n\t\t\t\t// \t\t\"cpu\": resource.MustParse(cpuReq),\n\t\t\t\t// \t\t\"memory\": resource.MustParse(memReq),\n\t\t\t\t// \t},\n\t\t\t\t// },\n\t\t\t\tEnv: []apiv1.EnvVar{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"MY_POD_NAMESPACE\",\n\t\t\t\t\t\tValueFrom: &apiv1.EnvVarSource{\n\t\t\t\t\t\t\tFieldRef: &apiv1.ObjectFieldSelector{\n\t\t\t\t\t\t\t\tFieldPath: \"metadata.namespace\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tRestartPolicy: apiv1.RestartPolicyNever,\n\t}\n\n\t//case for need injection configmap\n\tif cf != nil {\n\t\t//declare a volume for configmap\n\t\tpodSpec.Volumes = []apiv1.Volume{\n\t\t\t{\n\t\t\t\tName: \"cofig-vol\",\n\t\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\t\tConfigMap: &apiv1.ConfigMapVolumeSource{\n\t\t\t\t\t\tLocalObjectReference: apiv1.LocalObjectReference{\n\t\t\t\t\t\t\tName: cf.ConfigMapRef,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\t//volumn mount\n\t\tpodSpec.Containers[0].VolumeMounts = []apiv1.VolumeMount{\n\t\t\t{\n\t\t\t\tName: \"cofig-vol\",\n\t\t\t\tMountPath: cf.ConfigDir,\n\t\t\t},\n\t\t}\n\t\t//pass a env var for app know configmap dir path\n\t\tpodSpec.Containers[0].Env = append(podSpec.Containers[0].Env, apiv1.EnvVar{\n\t\t\tName: \"CONFIGDIR\",\n\t\t\tValue: cf.ConfigDir,\n\t\t})\n\t}\n\n\tobjectMeta := metav1.ObjectMeta{\n\t\tName: jobID,\n\t}\n\n\tif webhookSetting != nil {\n\t\t//injection into annotations\n\t\tobjectMeta.Annotations = decodeWebhookConfig(webhookSetting)\n\t\tlabels[\"webhook-enable\"] = \"true\"\n\t} else {\n\t\tlabels[\"webhook-enable\"] = \"false\"\n\t}\n\n\treturn &batchv1.Job{\n\t\tObjectMeta: objectMeta,\n\t\tSpec: batchv1.JobSpec{\n\t\t\tBackoffLimit: &backoffLimit,\n\t\t\t//FEATURE STATE: Kubernetes v1.12 [alpha]\n\t\t\tTTLSecondsAfterFinished: &ttlSecondsAfterFinished,\n\t\t\tTemplate: apiv1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: podSpec,\n\t\t\t},\n\t\t},\n\t}\n}", "func (d hook) toSpec() *specs.Hook {\n\ts := specs.Hook{\n\t\tHookName: d.Lifecycle,\n\t\tPath: d.Path,\n\t\tArgs: d.Args,\n\t}\n\n\treturn &s\n}", "func MakeHookRequest(t *testing.T, fixture, eventType string, changes ...fixtureFunc) *http.Request {\n\tbody := ReadJSONFixture(t, fixture)\n\tfor _, c := range changes {\n\t\tc(body)\n\t}\n\n\tserialisedBody := serialiseToJSON(t, body)\n\tmac := hmac.New(sha1.New, []byte(secret))\n\t_, err := mac.Write(serialisedBody.Bytes())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsig := hex.EncodeToString(mac.Sum(nil))\n\treq := httptest.NewRequest(\"POST\", \"/\", serialisedBody)\n\treq.Header.Add(\"X-GitHub-Delivery\", \"72d3162e-cc78-11e3-81ab-4c9367dc0958\")\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"X-GitHub-Event\", eventType)\n\treq.Header.Add(\"X-Hub-Signature\", fmt.Sprintf(\"sha1=%s\", sig))\n\treturn req\n}", "func newValidatingIsReadyWebhookFixture(f *framework.Framework, certCtx *certContext, servicePort int32, namespace string) admissionregistrationv1.ValidatingWebhook {\n\tsideEffectsNone := admissionregistrationv1.SideEffectClassNone\n\tfailOpen := admissionregistrationv1.Ignore\n\treturn admissionregistrationv1.ValidatingWebhook{\n\t\tName: \"validating-is-webhook-configuration-ready.k8s.io\",\n\t\tRules: []admissionregistrationv1.RuleWithOperations{{\n\t\t\tOperations: []admissionregistrationv1.OperationType{admissionregistrationv1.Create},\n\t\t\tRule: admissionregistrationv1.Rule{\n\t\t\t\tAPIGroups: []string{\"\"},\n\t\t\t\tAPIVersions: []string{\"v1\"},\n\t\t\t\tResources: []string{\"configmaps\"},\n\t\t\t},\n\t\t}},\n\t\tClientConfig: admissionregistrationv1.WebhookClientConfig{\n\t\t\tService: &admissionregistrationv1.ServiceReference{\n\t\t\t\tNamespace: namespace,\n\t\t\t\tName: serviceName,\n\t\t\t\tPath: strPtr(\"/always-deny\"),\n\t\t\t\tPort: pointer.Int32(servicePort),\n\t\t\t},\n\t\t\tCABundle: certCtx.signingCert,\n\t\t},\n\t\t// network failures while the service network routing is being set up should be ignored by the marker\n\t\tFailurePolicy: &failOpen,\n\t\tSideEffects: &sideEffectsNone,\n\t\tAdmissionReviewVersions: []string{\"v1\", \"v1beta1\"},\n\t\t// Scope the webhook to just the markers namespace\n\t\tNamespaceSelector: &metav1.LabelSelector{\n\t\t\tMatchLabels: map[string]string{uniqueName + \"-markers\": \"true\"},\n\t\t},\n\t\t// appease createValidatingWebhookConfiguration isolation requirements\n\t\tObjectSelector: &metav1.LabelSelector{\n\t\t\tMatchLabels: map[string]string{uniqueName: \"true\"},\n\t\t},\n\t}\n}", "func (o *CreateGitWebhookUsingPOSTParams) SetGitWebhookSpec(gitWebhookSpec models.GitWebhookSpec) {\n\to.GitWebhookSpec = gitWebhookSpec\n}", "func NewWebhook(link string) WebhookConfig {\n\tu, _ := url.Parse(link)\n\n\treturn WebhookConfig{\n\t\tURL: u,\n\t\tClear: false,\n\t}\n}", "func NewWebhook(url string, filterFnString string, timeout uint64) (*Webhook, error) {\n\n\tvar err error\n\n\tif url == \"\" {\n\t\terr = errors.New(\"url parameter must be defined for webhook events.\")\n\t\treturn nil, err\n\t}\n\n\twh := &Webhook{\n\t\turl: url,\n\t}\n\tif filterFnString != \"\" {\n\t\twh.filter = NewJSEventFunction(filterFnString)\n\t}\n\n\tif timeout != 0 {\n\t\twh.timeout = time.Duration(timeout) * time.Second\n\t} else {\n\t\twh.timeout = time.Duration(kDefaultWebhookTimeout) * time.Second\n\t}\n\n\treturn wh, err\n}", "func New(mgr manager.Manager, args Args) (*Webhook, error) {\n\tlogger := log.Log.WithName(args.Name).WithValues(\"provider\", args.Provider)\n\n\t// Create handler\n\tbuilder := NewBuilder(mgr, logger)\n\n\tfor val, objs := range args.Validators {\n\t\tbuilder.WithValidator(val, objs...)\n\t}\n\n\tfor mut, objs := range args.Mutators {\n\t\tbuilder.WithMutator(mut, objs...)\n\t}\n\n\tbuilder.WithPredicates(args.Predicates...)\n\n\thandler, err := builder.Build()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create webhook\n\tlogger.Info(\"Creating webhook\")\n\n\treturn &Webhook{\n\t\tPath: args.Path,\n\t\tWebhook: &admission.Webhook{Handler: handler},\n\t}, nil\n}", "func NewGenericWebhook(kubeConfigFile string, groupVersions []unversioned.GroupVersion) (*GenericWebhook, error) {\n\tfor _, groupVersion := range groupVersions {\n\t\tif !registered.IsEnabledVersion(groupVersion) {\n\t\t\treturn nil, fmt.Errorf(\"webhook plugin requires enabling extension resource: %s\", groupVersion)\n\t\t}\n\t}\n\n\tloadingRules := clientcmd.NewDefaultClientConfigLoadingRules()\n\tloadingRules.ExplicitPath = kubeConfigFile\n\tloader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{})\n\n\tclientConfig, err := loader.ClientConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcodec := api.Codecs.LegacyCodec(groupVersions...)\n\tclientConfig.ContentConfig.NegotiatedSerializer = runtimeserializer.NegotiatedSerializerWrapper(\n\t\truntime.SerializerInfo{Serializer: codec},\n\t\truntime.StreamSerializerInfo{},\n\t)\n\n\trestClient, err := restclient.UnversionedRESTClientFor(clientConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// TODO(ericchiang): Can we ensure remote service is reachable?\n\n\treturn &GenericWebhook{restClient}, nil\n}", "func NewWebHookSink(uri *url.URL) (*WebHookSink, error) {\n\ts := &WebHookSink{\n\t\t// default http method\n\t\tmethod: http.MethodGet,\n\t\tbodyTemplate: defaultBodyTemplate,\n\t\tfilters: make(map[string]filters.Filter),\n\t}\n\n\tif len(uri.Host) > 0 {\n\t\ts.endpoint = uri.String()\n\t} else {\n\t\tklog.Errorf(\"uri host's length is 0 and pls check your uri: %v\", uri)\n\t\treturn nil, fmt.Errorf(\"uri host is not valid.url: %v\", uri)\n\t}\n\n\topts := uri.Query()\n\n\tif len(opts[\"method\"]) >= 1 {\n\t\ts.method = opts[\"method\"][0]\n\t}\n\n\t// set header of webHook\n\ts.headerMap = parseHeaders(opts[\"header\"])\n\n\tlevel := Warning\n\tif len(opts[\"level\"]) >= 1 {\n\t\tlevel = opts[\"level\"][0]\n\t\ts.filters[\"LevelFilter\"] = filters.NewGenericFilter(\"Type\", getLevels(level), false)\n\t}\n\n\tif len(opts[\"namespaces\"]) >= 1 {\n\t\t// namespace filter doesn't support regexp\n\t\tnamespaces := filters.GetValues(opts[\"namespaces\"])\n\t\ts.filters[\"NamespacesFilter\"] = filters.NewGenericFilter(\"Namespace\", namespaces, false)\n\t}\n\n\tif len(opts[\"kinds\"]) >= 1 {\n\t\t// such as node,pod,component and so on\n\t\t// kinds:https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#lists-and-simple-kinds\n\t\tkinds := filters.GetValues(opts[\"kinds\"])\n\t\ts.filters[\"KindsFilter\"] = filters.NewGenericFilter(\"Kind\", kinds, false)\n\t}\n\n\tif len(opts[\"reason\"]) >= 1 {\n\t\t// reason filter support regexp.\n\t\treasons := filters.GetValues(opts[\"reason\"])\n\t\ts.filters[\"ReasonsFilter\"] = filters.NewGenericFilter(\"Reason\", reasons, true)\n\t}\n\n\tif len(opts[\"custom_body_configmap\"]) >= 1 {\n\t\ts.bodyConfigMapName = opts[\"custom_body_configmap\"][0]\n\n\t\tif len(opts[\"custom_body_configmap_namespace\"]) >= 1 {\n\t\t\ts.bodyConfigMapNamespace = opts[\"custom_body_configmap_namespace\"][0]\n\t\t} else {\n\t\t\ts.bodyConfigMapNamespace = \"default\"\n\t\t}\n\n\t\tclient, err := kubernetes.GetKubernetesClient(nil)\n\t\tif err != nil {\n\t\t\tklog.Warningf(\"Failed to get kubernetes client and use default bodyTemplate instead\")\n\t\t\ts.bodyTemplate = defaultBodyTemplate\n\t\t\treturn s, nil\n\t\t}\n\t\tconfigmap, err := client.CoreV1().ConfigMaps(s.bodyConfigMapNamespace).Get(s.bodyConfigMapName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tklog.Warningf(\"Failed to get configMap %s in namespace %s and use default bodyTemplate instead,because of %v\", s.bodyConfigMapName, s.bodyConfigMapNamespace, err)\n\t\t\ts.bodyTemplate = defaultBodyTemplate\n\t\t\treturn s, nil\n\t\t}\n\t\tif content, ok := configmap.Data[\"content\"]; !ok {\n\t\t\tklog.Warningf(\"Failed to get configMap content and use default bodyTemplate instead,because of %v\", err)\n\t\t\ts.bodyTemplate = defaultBodyTemplate\n\t\t\treturn s, nil\n\t\t} else {\n\t\t\ts.bodyTemplate = content\n\t\t}\n\t}\n\n\treturn s, nil\n}", "func NewSpec(api *gmail.Service, db *db.DB) (*Spec, error) {\n\tlog.SetLevel(log.DebugLevel)\n\tlog.Info(\"starting new spec\")\n\n\tbytes, err := ioutil.ReadFile(\"./spec.yaml\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read file: %v\", err)\n\t}\n\n\tspec := &Spec{\n\t\tapi: api,\n\t\tdb: db,\n\t}\n\n\terr = yaml.Unmarshal(bytes, spec)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal: %v\", err)\n\t}\n\n\treturn spec, nil\n}", "func NewCmdWebhook(name, fullName string) *cobra.Command {\n\tcreateCmd := newCmdCreate(createRecommendedCommandName, odoutil.GetFullName(fullName, createRecommendedCommandName))\n\tdeleteCmd := newCmdDelete(deleteRecommendedCommandName, odoutil.GetFullName(fullName, deleteRecommendedCommandName))\n\tlistCmd := newCmdList(listRecommendedCommandName, odoutil.GetFullName(fullName, listRecommendedCommandName))\n\n\tvar webhookCmd = &cobra.Command{\n\t\tUse: name,\n\t\tShort: \"Manage Git repository webhooks\",\n\t\tLong: \"Add/Delete/list Git repository webhooks that trigger CI/CD pipeline runs.\",\n\t\tExample: fmt.Sprintf(\"%s\\n%s\\n%s\\n%s\\n\\n See sub-commands individually for more examples\",\n\t\t\tfullName,\n\t\t\tcreateRecommendedCommandName,\n\t\t\tdeleteRecommendedCommandName,\n\t\t\tlistRecommendedCommandName),\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t},\n\t}\n\n\twebhookCmd.AddCommand(createCmd)\n\twebhookCmd.AddCommand(deleteCmd)\n\twebhookCmd.AddCommand(listCmd)\n\n\twebhookCmd.Annotations = map[string]string{\"command\": \"main\"}\n\twebhookCmd.SetUsageTemplate(odoutil.CmdUsageTemplate)\n\treturn webhookCmd\n}", "func NewWebhook(url string, file interface{}) *SetWebhookParameters {\n\treturn &SetWebhookParameters{\n\t\tURL: url,\n\t\tCertificate: file,\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewCronSpec generates a new CronSpec from a job.CronSpec
func NewCronSpec(spec *job.CronSpec) *CronSpec { return &CronSpec{ CronSchedule: spec.CronSchedule, CreatedAt: spec.CreatedAt, UpdatedAt: spec.UpdatedAt, } }
[ "func NewCronFromJobSpec(\n\tjobSpec job.Job,\n\tpipelineRunner pipeline.Runner,\n) (*Cron, error) {\n\n\tcronSpec := jobSpec.CronSpec\n\tspec := jobSpec.PipelineSpec\n\n\tcronLogger := logger.CreateLogger(\n\t\tlogger.Default.With(\n\t\t\t\"jobID\", jobSpec.ID,\n\t\t\t\"schedule\", cronSpec.CronSchedule,\n\t\t),\n\t)\n\n\treturn &Cron{\n\t\tchDone: make(chan struct{}),\n\t\tchStop: make(chan struct{}),\n\t\tcronRunner: cronParser.New(),\n\t\tjobID: jobSpec.ID,\n\t\tlogger: cronLogger,\n\t\tpipelineRunner: pipelineRunner,\n\t\tpipelineSpec: *spec,\n\t\tSchedule: cronSpec.CronSchedule,\n\t}, nil\n}", "func (w *Worker) ByCronSpec(spec string) *Worker {\n\tw.schedule = ByCronSchedule(spec)\n\treturn w\n}", "func (in *CronSpec) DeepCopy() *CronSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(CronSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func createCronJobConfig(module *protos.Module) *v1beta1.CronJob {\n\tvolumes, volumeMounts := makeVolumes(module)\n\tcontainers := makeContainers(module, volumeMounts)\n\tm := module.GetSpec()\n\n\tsuccessfulJobHistoryLimit := int32(1)\n\tfailedJobsHistoryLimit := int32(1)\n\treturn &v1beta1.CronJob{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: module.Name,\n\t\t},\n\t\tSpec: v1beta1.CronJobSpec{\n\t\t\tSchedule: m.Schedule,\n\t\t\tJobTemplate: v1beta1.JobTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"name\": module.Name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: batchv1.JobSpec{\n\t\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\t\tContainers: containers,\n\t\t\t\t\t\t\tVolumes: volumes,\n\t\t\t\t\t\t\tHostNetwork: true,\n\t\t\t\t\t\t\tRestartPolicy: \"Never\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSuccessfulJobsHistoryLimit: &successfulJobHistoryLimit,\n\t\t\tFailedJobsHistoryLimit: &failedJobsHistoryLimit,\n\t\t},\n\t}\n}", "func NewCronWorkflowSpec() *cronworkflowv1.CronWorkflowSpec {\n\treturn &cronworkflowv1.CronWorkflowSpec{\n\t\tSchedule: \"* * * * *\",\n\t\tConcurrencyPolicy: batchv2alpha1.AllowConcurrent,\n\t\tWorkflowTemplate: NewWorkflowTemplateSpec(),\n\t}\n}", "func newCronJobForCR(cr *v1alpha1.Copybird) *v1beta1.CronJob {\n\tlabels := map[string]string{\n\t\t\"app\": cr.Name,\n\t}\n\treturn &v1beta1.CronJob{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: cr.Name + \"-cronjob\",\n\t\t\tNamespace: cr.Namespace,\n\t\t\tLabels: labels,\n\t\t},\n\t\tSpec: v1beta1.CronJobSpec{\n\t\t\tSchedule: cr.Spec.Cron,\n\t\t\tJobTemplate: v1beta1.JobTemplateSpec{\n\t\t\t\tSpec: batchv1.JobSpec{\n\t\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: cr.Name + \"-copybird\",\n\t\t\t\t\t\t\tNamespace: cr.Namespace,\n\t\t\t\t\t\t\tLabels: labels,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: cr.Name,\n\t\t\t\t\t\t\t\t\tImage: \"copybird/copybird\",\n\t\t\t\t\t\t\t\t\tCommand: []string{},\n\t\t\t\t\t\t\t\t\tArgs: []string{},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tRestartPolicy: \"OnFailure\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (in *CronJobSpec) DeepCopy() *CronJobSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(CronJobSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func newCron(schedule string) (chan bool, *cron.Cron) {\n\tchannel := make(chan bool)\n\tcj := cron.New()\n\tcj.AddFunc(schedule, func() { cronTriggered(channel) })\n\tglog.Info(\"db backup schedule: \" + schedule)\n\treturn channel, cj\n}", "func (*CronSpec) Descriptor() ([]byte, []int) {\n\treturn file_toit_model_job_proto_rawDescGZIP(), []int{18}\n}", "func new(t time.Duration, sleepToBeginningOfMinute ...bool) *Crontab {\n\t// fmt.Printf(\"The configuration parameters: %v\\n\", sleepToBeginningOfMinute)\n\tif len(sleepToBeginningOfMinute) > 0 && sleepToBeginningOfMinute[0] {\n\t\t// wait until the minute 00 seconds 000 milliseconds\n\t\t// to make the trigger starting at 00 seconds 000 milliseconds\n\t\tnow := time.Now()\n\t\ttime.Sleep(time.Duration(60-now.Second())*time.Second - now.Sub(now.Truncate(time.Second)))\n\t}\n\tc := &Crontab{\n\t\tticker: time.NewTicker(t),\n\t\tstatsChan: make(chan ExecStats),\n\t}\n\n\tgo func() {\n\t\t// check straight away which jobs to run (according to their crontab)\n\t\tc.runScheduled(time.Now())\n\t\tfor t := range c.ticker.C {\n\t\t\tc.runScheduled(t)\n\t\t}\n\t}()\n\n\treturn c\n}", "func NewCron() helmify.Processor {\n\treturn &cron{}\n}", "func NewCron(runner func(*schedule.Schedule)) *Cron {\n\treturn &Cron{cron.New(), runner}\n}", "func CreateOrUpdate(ctx context.Context, c client.Client, cj *batchv1.CronJob, equal EqualityFunc, mutate MutateFunc) error {\n\tcurrent := &batchv1.CronJob{}\n\tkey := client.ObjectKey{Name: cj.Name, Namespace: cj.Namespace}\n\terr := c.Get(ctx, key, current)\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\terr = c.Create(ctx, cj)\n\n\t\t\tif err == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\treturn kverrors.Wrap(err, \"failed to create cronjob\",\n\t\t\t\t\"name\", cj.Name,\n\t\t\t\t\"namespace\", cj.Namespace,\n\t\t\t)\n\t\t}\n\n\t\treturn kverrors.Wrap(err, \"failed to get cronjob\",\n\t\t\t\"name\", cj.Name,\n\t\t\t\"namespace\", cj.Namespace,\n\t\t)\n\t}\n\n\tif !equal(current, cj) {\n\t\terr := retry.RetryOnConflict(retry.DefaultRetry, func() error {\n\t\t\tif err := c.Get(ctx, key, current); err != nil {\n\t\t\t\treturn kverrors.Wrap(err, \"failed to get cronjob\",\n\t\t\t\t\t\"name\", cj.Name,\n\t\t\t\t\t\"namespace\", cj.Namespace,\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tmutate(current, cj)\n\t\t\tif err := c.Update(ctx, current); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn kverrors.Wrap(err, \"failed to update cronjob\",\n\t\t\t\t\"name\", cj.Name,\n\t\t\t\t\"namespace\", cj.Namespace,\n\t\t\t)\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn nil\n}", "func makeCron(rules []*Rule, tm *tskMaster) (*cron.Cron, error) {\n\tc := cron.New(cron.WithSeconds())\n\tfor _, rule := range rules {\n\t\tif rule.CronCheck == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tjob := newJob(rule, tm)\n\t\t_, err := c.AddJob(rule.CronCheck, job)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid cron: '%v'\", err.Error())\n\t\t}\n\t}\n\tc.Location()\n\tc.Start()\n\n\treturn c, nil\n}", "func New() *Cron {\n\treturn &Cron{\n\t\tcontrol: make(chan *Entry),\n\t\tstop: make(chan struct{}),\n\t\tentries: make(map[string]*Entry),\n\t\tjtree: RbtreeInit(CronInsert),\n\t\t//lock\n\t\trunning: false,\n\t}\n}", "func (s *deploymentServer) createCronjob(ctx context.Context, manifest []byte, env []EnvVar, initVariables []EnvVar) error {\n\tdecoder := k8sYaml.NewYAMLOrJSONDecoder(bytes.NewReader(manifest), 1000)\n\n\tj := &apibatch.CronJob{}\n\n\tif err := decoder.Decode(&j); err != nil {\n\t\treturn err\n\t}\n\n\tif len(env) > 0 {\n\t\tcontainers := j.Spec.JobTemplate.Spec.Template.Spec.Containers\n\t\tapplyEnvironment(containers, env)\n\t}\n\n\tinitContainers := j.Spec.JobTemplate.Spec.Template.Spec.InitContainers\n\tif len(initContainers) > 0 {\n\t\tfmt.Println(\"job \" + j.Namespace + \".\" + j.Name + \" has initContainers\")\n\t\tapplyEnvironment(initContainers, initVariables)\n\t} else {\n\t\tfmt.Println(\"job \" + j.Namespace + \".\" + j.Name + \" has not initContainers; bug in config\")\n\t}\n\n\tbatchAPI := s.clientset.BatchV1beta1()\n\tapiJobs := batchAPI.CronJobs(j.Namespace)\n\n\tif _, err := apiJobs.Create(ctx, j, metav1.CreateOptions{}); err != nil {\n\t\treturn fmt.Errorf(\"job create error '%s'\", err.Error())\n\t}\n\treturn nil\n}", "func New(cronExpr, timeZone string, durationMin uint64) (cr *CronRange, err error) {\n\t// Precondition check\n\tif durationMin == 0 {\n\t\terr = errZeroDuration\n\t\treturn\n\t}\n\n\t// Clean up string parameters\n\tcronExpr, timeZone = strings.TrimSpace(cronExpr), strings.TrimSpace(timeZone)\n\n\t// Append time zone into cron spec if necessary\n\tcronSpec := cronExpr\n\tif strings.ToLower(timeZone) == \"local\" {\n\t\ttimeZone = \"\"\n\t} else if len(timeZone) > 0 {\n\t\tcronSpec = fmt.Sprintf(\"CRON_TZ=%s %s\", timeZone, cronExpr)\n\t}\n\n\t// Validate & retrieve crontab schedule\n\tvar schedule cron.Schedule\n\tif schedule, err = cronParser.Parse(cronSpec); err != nil {\n\t\treturn\n\t}\n\n\tcr = &CronRange{\n\t\tcronExpression: cronExpr,\n\t\ttimeZone: timeZone,\n\t\tduration: time.Minute * time.Duration(durationMin),\n\t\tschedule: schedule,\n\t}\n\treturn\n}", "func New() *Cron {\n\treturn NewWithLocation(time.Now().Location())\n}", "func New() Cron {\n\treturn &cron{cr.New()}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewBlockhashStoreSpec creates a new BlockhashStoreSpec for the given parameters.
func NewBlockhashStoreSpec(spec *job.BlockhashStoreSpec) *BlockhashStoreSpec { return &BlockhashStoreSpec{ CoordinatorV1Address: spec.CoordinatorV1Address, CoordinatorV2Address: spec.CoordinatorV2Address, CoordinatorV2PlusAddress: spec.CoordinatorV2PlusAddress, WaitBlocks: spec.WaitBlocks, LookbackBlocks: spec.LookbackBlocks, BlockhashStoreAddress: spec.BlockhashStoreAddress, TrustedBlockhashStoreAddress: spec.TrustedBlockhashStoreAddress, TrustedBlockhashStoreBatchSize: spec.TrustedBlockhashStoreBatchSize, PollPeriod: spec.PollPeriod, RunTimeout: spec.RunTimeout, EVMChainID: spec.EVMChainID, FromAddresses: spec.FromAddresses, } }
[ "func newBlockStore(id string, conf *Conf, indexConfig *IndexConfig,\n\tdbHandle *leveldbhelper.DBHandle, stats *stats) (*BlockStore, error) {\n\tfileMgr, err := newBlockfileMgr(id, conf, indexConfig, dbHandle)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create ledgerStats and initialize blockchain_height stat\n\tledgerStats := stats.ledgerStats(id)\n\tinfo := fileMgr.getBlockchainInfo()\n\tledgerStats.updateBlockchainHeight(info.Height)\n\n\treturn &BlockStore{id, conf, fileMgr, ledgerStats}, nil\n}", "func newMockBlockHeaderStore() *mockBlockHeaderStore {\n\treturn &mockBlockHeaderStore{\n\t\theaders: make(map[chainhash.Hash]wire.BlockHeader),\n\t\theights: make(map[uint32]wire.BlockHeader),\n\t}\n}", "func newFsBlockStore(id string, conf *Conf, indexConfig *blkstorage.IndexConfig,\n\tdbHandle *leveldbhelper.DBHandle, stats *stats) *fsBlockStore {\n\tfileMgr := newBlockfileMgr(id, conf, indexConfig, dbHandle)\n\n\t// create ledgerStats and initialize blockchain_height stat\n\tledgerStats := stats.ledgerStats(id)\n\tinfo := fileMgr.getBlockchainInfo()\n\tledgerStats.updateBlockchainHeight(info.Height)\n\n\treturn &fsBlockStore{id, conf, fileMgr, ledgerStats}\n}", "func (p *CDBBlockstoreProvider) createNonCommitterBlockStore(ledgerid, blockStoreDBName, txnStoreDBName string, opts ...option) (api.BlockStore, error) {\n\t//create new block store db\n\tblockStoreDB, err := p.openCouchDB(blockStoreDBName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//check if indexes exists\n\tindexExists, err := blockStoreDB.IndexDesignDocExistsWithRetry(blockHashIndexDoc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !indexExists {\n\t\treturn nil, errors.Errorf(\"DB index not found: [%s]\", blockStoreDBName)\n\t}\n\n\t//create new txn store db\n\ttxnStoreDB, err := p.openCouchDB(txnStoreDBName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newCDBBlockStore(blockStoreDB, txnStoreDB, ledgerid, opts...), nil\n}", "func createCommitterBlockStore(couchInstance *couchdb.CouchInstance, ledgerid, blockStoreDBName, txnStoreDBName string, opts ...option) (api.BlockStore, error) {\n\tblockStoreDB, err := couchdb.CreateCouchDatabase(couchInstance, blockStoreDBName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttxnStoreDB, err := couchdb.CreateCouchDatabase(couchInstance, txnStoreDBName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = createBlockStoreIndices(blockStoreDB)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newCDBBlockStore(blockStoreDB, txnStoreDB, ledgerid, opts...), nil\n}", "func newChainStore(r repo.Repo, genTS *types.TipSet) *CborBlockStore {\n\ttempBlock := r.Datastore()\n\tcborStore := cbor.NewCborStore(tempBlock)\n\treturn &CborBlockStore{\n\t\tStore: chain.NewStore(r.ChainDatastore(), tempBlock, genTS.At(0).Cid(), chain.NewMockCirculatingSupplyCalculator()),\n\t\tcborStore: cborStore,\n\t}\n}", "func New(dbContext model.DBReader, cacheSize int) (model.BlockStore, error) {\n\tblockStore := &blockStore{\n\t\tstaging: make(map[externalapi.DomainHash]*externalapi.DomainBlock),\n\t\ttoDelete: make(map[externalapi.DomainHash]struct{}),\n\t\tcache: lrucache.New(cacheSize),\n\t}\n\n\terr := blockStore.initializeCount(dbContext)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn blockStore, nil\n}", "func (in *SecretStoreSpec) DeepCopy() *SecretStoreSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SecretStoreSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func newBlockStorage(c *Config, b *Block) *blockStorage {\n\treturn &blockStorage{\n\t\tc: c,\n\t\tblock: b,\n\t\tsigs: make(map[int][]byte),\n\t\tpub: share.NewPubPoly(G2, G2.Point().Base(), c.Public),\n\t}\n}", "func newBlockstoreManager(bs bstore.Blockstore, workerCount int) *blockstoreManager {\n\treturn &blockstoreManager{\n\t\tbs: bs,\n\t\tworkerCount: workerCount,\n\t\tjobs: make(chan func()),\n\t}\n}", "func NewMockStore(blocksWritten map[ipld.Link][]byte) (ipldbridge.Loader, ipldbridge.Storer) {\n\tvar storeLk sync.RWMutex\n\tstorer := func(lnkCtx ipldbridge.LinkContext) (io.Writer, ipldbridge.StoreCommitter, error) {\n\t\tvar buffer bytes.Buffer\n\t\tcommitter := func(lnk ipld.Link) error {\n\t\t\tstoreLk.Lock()\n\t\t\tblocksWritten[lnk] = buffer.Bytes()\n\t\t\tstoreLk.Unlock()\n\t\t\treturn nil\n\t\t}\n\t\treturn &buffer, committer, nil\n\t}\n\tloader := func(lnk ipld.Link, lnkCtx ipldbridge.LinkContext) (io.Reader, error) {\n\t\tstoreLk.RLock()\n\t\tdata, ok := blocksWritten[lnk]\n\t\tstoreLk.RUnlock()\n\t\tif ok {\n\t\t\treturn bytes.NewReader(data), nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"unable to load block\")\n\t}\n\n\treturn loader, storer\n}", "func New(fn string, hashWidth, ptrBytes int) (s *Store, err error) {\n\ts = &Store{\n\t\tHashWidth: min(max(hashWidth, 8), 29),\n\t\tPtrBytes: min(max(ptrBytes, 4), 7),\n\t}\n\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\ts = nil\n\t\t\terr = e.(error)\n\t\t}\n\t}()\n\n\tif s.accessor, err = storage.NewFile(fn, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0666); err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %s\", me(), err)\n\t}\n\n\tb := []byte{\n\t\t'Z', 'O', 'N', 'E', 'D', 'B',\n\t\tbyte(s.HashWidth),\n\t\tbyte(s.PtrBytes),\n\t\t0, 0, 0, 0, 0, 0, 0, 0,\n\t}\n\tif n, err := s.accessor.WriteAt(b, 0); n != len(b) || err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %s\", me(), err)\n\t}\n\n\tif s.Store, err = hdb.New(&hdbAccessor{s.delta(), s.accessor}); err != nil {\n\t\treturn nil, fmt.Errorf(\"%s: %s\", me(), err)\n\t}\n\n\treturn\n}", "func NewBlock(hash string) *pfs.Block {\n\treturn &pfs.Block{\n\t\tHash: hash,\n\t}\n}", "func (in *StoreSpec) DeepCopy() *StoreSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(StoreSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func newBlock(prevHash [32]byte) *protocol.Block {\n\tb := new(protocol.Block)\n\tb.PrevHash = prevHash\n\tb.StateCopy = make(map[[32]byte]*protocol.Account)\n\treturn b\n}", "func NewBlockStorage(blockstore *app.BlockStore, dir string) *BlockStorage {\n\treturn &BlockStorage{\n\t\tblockstore: blockstore,\n\t\tdir: dir,\n\t}\n}", "func NewStore(log logger.Entry, cfg Config) (*Store, error) {\n\n\t// Open bolt database\n\t// TODO: move 0644 to Config\n\tdb, err := bolt.Open(cfg.File, 0644, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.WithField(\"config\", cfg).Debug(\"Create store\")\n\ts := Store{\n\t\tBucket: []byte(cfg.Bucket),\n\t\tNumberKey: []byte(cfg.NumberKey),\n\t\tSettingsKey: []byte(cfg.SettingsKey),\n\t\tdb: db,\n\t\tlog: log,\n\t}\n\treturn &s, nil\n\n}", "func newBlockfileMgr(id string, conf *Conf, indexConfig *blkstorage.IndexConfig, indexStore *leveldbhelper.DBHandle) *blockfileMgr {\n\tlogger.Debugf(\"newBlockfileMgr() initializing file-based block storage for ledger: %s \", id)\n\tvar rwMutexs []*sync.RWMutex\n\n\t//Determine the root directory for the blockfile storage, if it does not exist create it\n\trootDir := conf.getLedgerBlockDir(id)\n\t_, err := util.CreateDirIfMissing(rootDir)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Error: %s\", err))\n\t}\n\t// Instantiate the manager, i.e. blockFileMgr structure\n\tmgr := &blockfileMgr{rootDir: rootDir, conf: conf, db: indexStore, rwMutexs: rwMutexs}\n\n\t// cp = checkpointInfo, retrieve from the database the file suffix or number of where blocks were stored.\n\t// It also retrieves the current size of that file and the last block number that was written to that file.\n\t// At init checkpointInfo:latestFileChunkSuffixNum=[0], latestFileChunksize=[0], lastBlockNumber=[0]\n\tcpInfo, err := mgr.loadCurrentInfo()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Could not get block file info for current block file from db: %s\", err))\n\t}\n\tif cpInfo == nil {\n\t\tlogger.Info(`Getting block information from block storage`)\n\t\tif cpInfo, err = constructCheckpointInfoFromBlockFiles(rootDir); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Could not build checkpoint info from block files: %s\", err))\n\t\t}\n\t\tlogger.Debugf(\"Info constructed by scanning the blocks dir = %s\", spew.Sdump(cpInfo))\n\t} else {\n\t\tlogger.Debug(`Synching block information from block storage (if needed)`)\n\t\tsyncCPInfoFromFS(rootDir, cpInfo)\n\t}\n\terr = mgr.saveCurrentInfo(cpInfo, true)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Could not save next block file info to db: %s\", err))\n\t}\n\n\tmgr.oldestFileChunkSuffixNum = syncOldestFileNum(rootDir)\n\t//If start up is a restart of an existing storage,new the rwMutex for the files\n\tif conf.dumpConf.Enabled {\n\t\tfor i := 0; i <= cpInfo.latestFileChunkSuffixNum; i++ {\n\t\t\trwMutex := new(sync.RWMutex)\n\t\t\tmgr.rwMutexs = append(mgr.rwMutexs, rwMutex)\n\t\t}\n\t}\n\tmgr.dumpMutex = new(sync.Mutex)\n\n\t//Open a writer to the file identified by the number and truncate it to only contain the latest block\n\t// that was completely saved (file system, index, cpinfo, etc)\n\tcurrentFileWriter, err := newBlockfileWriter(deriveBlockfilePath(rootDir, cpInfo.latestFileChunkSuffixNum))\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Could not open writer to current file: %s\", err))\n\t}\n\t//Truncate the file to remove excess past last block\n\terr = currentFileWriter.truncateFile(cpInfo.latestFileChunksize)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Could not truncate current file to known size in db: %s\", err))\n\t}\n\n\t// Create a new KeyValue store database handler for the blocks index in the keyvalue database\n\tmgr.index = newBlockIndex(indexConfig, indexStore)\n\n\t// Update the manager with the checkpoint info and the file writer\n\tmgr.cpInfo = cpInfo\n\tmgr.currentFileWriter = currentFileWriter\n\t// Create a checkpoint condition (event) variable, for the goroutine waiting for\n\t// or announcing the occurrence of an event.\n\tmgr.cpInfoCond = sync.NewCond(&sync.Mutex{})\n\n\t// init BlockchainInfo for external API's\n\tbcInfo := &common.BlockchainInfo{\n\t\tHeight: 0,\n\t\tCurrentBlockHash: nil,\n\t\tPreviousBlockHash: nil}\n\n\tif !cpInfo.isChainEmpty {\n\t\t//If start up is a restart of an existing storage, sync the index from block storage and update BlockchainInfo for external API's\n\t\tmgr.syncIndex()\n\t\tlastBlockHeader, err := mgr.retrieveBlockHeaderByNumber(cpInfo.lastBlockNumber)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Could not retrieve header of the last block form file: %s\", err))\n\t\t}\n\t\tlastBlockHash := lastBlockHeader.Hash()\n\t\tpreviousBlockHash := lastBlockHeader.PreviousHash\n\t\tbcInfo = &common.BlockchainInfo{\n\t\t\tHeight: cpInfo.lastBlockNumber + 1,\n\t\t\tCurrentBlockHash: lastBlockHash,\n\t\t\tPreviousBlockHash: previousBlockHash}\n\t}\n\tmgr.bcInfo.Store(bcInfo)\n\treturn mgr\n}", "func getBlockStoreProvider(fsPath string) (*blkstorage.BlockStoreProvider, error) {\n\t// Format path to block store\n\tblockStorePath := kvledger.BlockStorePath(filepath.Join(fsPath, ledgersDataDirName))\n\tisEmpty, err := fileutil.DirEmpty(blockStorePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif isEmpty {\n\t\treturn nil, errors.Errorf(\"provided path %s is empty. Aborting identifytxs\", fsPath)\n\t}\n\t// Default fields for block store provider\n\tconf := blkstorage.NewConf(blockStorePath, 0)\n\tindexConfig := &blkstorage.IndexConfig{\n\t\tAttrsToIndex: []blkstorage.IndexableAttr{\n\t\t\tblkstorage.IndexableAttrBlockNum,\n\t\t\tblkstorage.IndexableAttrBlockHash,\n\t\t\tblkstorage.IndexableAttrTxID,\n\t\t\tblkstorage.IndexableAttrBlockNumTranNum,\n\t\t},\n\t}\n\tmetricsProvider := &disabled.Provider{}\n\t// Create new block store provider\n\tblockStoreProvider, err := blkstorage.NewProvider(conf, indexConfig, metricsProvider)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn blockStoreProvider, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewBlockHeaderFeederSpec creates a new BlockHeaderFeederSpec for the given parameters.
func NewBlockHeaderFeederSpec(spec *job.BlockHeaderFeederSpec) *BlockHeaderFeederSpec { return &BlockHeaderFeederSpec{ CoordinatorV1Address: spec.CoordinatorV1Address, CoordinatorV2Address: spec.CoordinatorV2Address, CoordinatorV2PlusAddress: spec.CoordinatorV2PlusAddress, WaitBlocks: spec.WaitBlocks, LookbackBlocks: spec.LookbackBlocks, BlockhashStoreAddress: spec.BlockhashStoreAddress, BatchBlockhashStoreAddress: spec.BatchBlockhashStoreAddress, PollPeriod: spec.PollPeriod, RunTimeout: spec.RunTimeout, EVMChainID: spec.EVMChainID, FromAddresses: spec.FromAddresses, GetBlockhashesBatchSize: spec.GetBlockhashesBatchSize, StoreBlockhashesBatchSize: spec.StoreBlockhashesBatchSize, } }
[ "func (_Rootchain *RootchainFilterer) FilterNewHeaderBlock(opts *bind.FilterOpts, proposer []common.Address, headerBlockId []*big.Int, reward []*big.Int) (*RootchainNewHeaderBlockIterator, error) {\n\n\tvar proposerRule []interface{}\n\tfor _, proposerItem := range proposer {\n\t\tproposerRule = append(proposerRule, proposerItem)\n\t}\n\tvar headerBlockIdRule []interface{}\n\tfor _, headerBlockIdItem := range headerBlockId {\n\t\theaderBlockIdRule = append(headerBlockIdRule, headerBlockIdItem)\n\t}\n\tvar rewardRule []interface{}\n\tfor _, rewardItem := range reward {\n\t\trewardRule = append(rewardRule, rewardItem)\n\t}\n\n\tlogs, sub, err := _Rootchain.contract.FilterLogs(opts, \"NewHeaderBlock\", proposerRule, headerBlockIdRule, rewardRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &RootchainNewHeaderBlockIterator{contract: _Rootchain.contract, event: \"NewHeaderBlock\", logs: logs, sub: sub}, nil\n}", "func BlockSpec(block *Block) *Spec {\n\treturn &Spec{\n\t\tBlock: &Spec_BlockValue{\n\t\t\tBlockValue: block,\n\t\t},\n\t}\n}", "func TestBlockHeader(t *testing.T) {\n\tnonce, err := random.Uint64()\n\tif err != nil {\n\t\tt.Errorf(\"random.Uint64: Error generating nonce: %v\", err)\n\t}\n\n\thashes := []*daghash.Hash{mainnetGenesisHash, simnetGenesisHash}\n\n\tmerkleHash := mainnetGenesisMerkleRoot\n\tacceptedIDMerkleRoot := exampleAcceptedIDMerkleRoot\n\tbits := uint32(0x1d00ffff)\n\tbh := NewBlockHeader(1, hashes, merkleHash, acceptedIDMerkleRoot, exampleUTXOCommitment, bits, nonce)\n\n\t// Ensure we get the same data back out.\n\tif !reflect.DeepEqual(bh.ParentHashes, hashes) {\n\t\tt.Errorf(\"NewBlockHeader: wrong prev hashes - got %v, want %v\",\n\t\t\tspew.Sprint(bh.ParentHashes), spew.Sprint(hashes))\n\t}\n\tif !bh.HashMerkleRoot.IsEqual(merkleHash) {\n\t\tt.Errorf(\"NewBlockHeader: wrong merkle root - got %v, want %v\",\n\t\t\tspew.Sprint(bh.HashMerkleRoot), spew.Sprint(merkleHash))\n\t}\n\tif bh.Bits != bits {\n\t\tt.Errorf(\"NewBlockHeader: wrong bits - got %v, want %v\",\n\t\t\tbh.Bits, bits)\n\t}\n\tif bh.Nonce != nonce {\n\t\tt.Errorf(\"NewBlockHeader: wrong nonce - got %v, want %v\",\n\t\t\tbh.Nonce, nonce)\n\t}\n}", "func BlockAttrsSpec(blockAttrs *BlockAttrs) *Spec {\n\treturn &Spec{\n\t\tBlock: &Spec_BlockAttrs{\n\t\t\tBlockAttrs: blockAttrs,\n\t\t},\n\t}\n}", "func NewBlockHeader(prev BlockHeader, uxHash cipher.SHA256, currentTime, fee uint64, body BlockBody) BlockHeader {\n\tif currentTime <= prev.Time {\n\t\tlog.Panic(\"Time can only move forward\")\n\t}\n\tbodyHash := body.Hash()\n\tprevHash := prev.Hash()\n\treturn BlockHeader{\n\t\tBodyHash: bodyHash,\n\t\tVersion: prev.Version,\n\t\tPrevHash: prevHash,\n\t\tTime: currentTime,\n\t\tBkSeq: prev.BkSeq + 1,\n\t\tFee: fee,\n\t\tUxHash: uxHash,\n\t}\n}", "func TestFakeBlockHeaderFetcher(t *testing.T) {\n\tlogger := log.GetLogger().WithOutput(log.NewFormattingOutput(os.Stdout, log.NewHumanReadableFormatter()))\n\tffc := NewFakeBlockAndTimestampGetter(logger)\n\n\trequire.EqualValues(t, FAKE_CLIENT_LAST_TIMESTAMP_EXPECTED, ffc.data[FAKE_CLIENT_NUMBER_OF_BLOCKS-1], \"expected ffc last block to be of specific ts\")\n}", "func NewHeaderBlock(bigBlockSize POIFSBigBlockSize) *HeaderBlock {\n\th := new(HeaderBlock)\n\th.bigBlockSize = bigBlockSize\n\n\t// Set all default values. Out data is always 512 no matter what\n\th.data = make([]byte, SMALLER_BIG_BLOCK_SIZE)\n\tfor i := 0; i < len(h.data); i++ {\n\t\th.data[i] = byte(0xFF)\n\t}\n\t//Set all the default values\n\tbinary.LittleEndian.PutUint64(h.data, SIGNATURE)\n\tNewIntegerField(0x08, 0, h.data)\n\tNewIntegerField(0x0c, 0, h.data)\n\tNewIntegerField(0x10, 0, h.data)\n\tNewIntegerField(0x14, 0, h.data)\n\tNewShortField(0x18, int16(0x3b), h.data)\n\tNewShortField(0x1a, int16(0x3), h.data)\n\tNewShortField(0x1c, int16(-2), h.data)\n\n\tNewShortField(0x1e, bigBlockSize.HeaderValue, h.data)\n\tNewIntegerField(0x20, 0x6, h.data)\n\tNewIntegerField(0x24, 0, h.data)\n\tNewIntegerField(0x28, 0, h.data)\n\tNewIntegerField(0x34, 0, h.data)\n\tNewIntegerField(0x38, 0x1000, h.data)\n\n\t// Initialize the variables\n\th.property_start = END_OF_CHAIN\n\th.sbat_start = END_OF_CHAIN\n\th.xbat_start = END_OF_CHAIN\n\n\treturn h\n}", "func BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock, k Keeper) (tags sdk.Tags) {\n\tctx = ctx.WithLogger(ctx.Logger().With(\"handler\", \"beginBlock\").With(\"module\", \"iris/rand\"))\n\n\tcurrentTimestamp := ctx.BlockHeader().Time.Unix()\n\tlastBlockHeight := ctx.BlockHeight() - 1\n\tlastBlockHash := []byte(ctx.BlockHeader().LastBlockId.Hash)\n\n\t// get pending random number requests for lastBlockHeight\n\titerator := k.IterateRandRequestQueueByHeight(ctx, lastBlockHeight)\n\tdefer iterator.Close()\n\n\thandledRandReqNum := 0\n\tfor ; iterator.Valid(); iterator.Next() {\n\t\tvar request Request\n\t\tk.GetCdc().MustUnmarshalBinaryLengthPrefixed(iterator.Value(), &request)\n\n\t\t// get the request id\n\t\treqID := GenerateRequestID(request)\n\n\t\t// generate a random number\n\t\trand := MakePRNG(lastBlockHash, currentTimestamp, request.Consumer).GetRand()\n\t\tk.SetRand(ctx, reqID, NewRand(request.TxHash, lastBlockHeight, rand))\n\n\t\t// remove the request\n\t\tk.DequeueRandRequest(ctx, lastBlockHeight, reqID)\n\n\t\t// add tags\n\t\ttags = tags.AppendTags(sdk.NewTags(\n\t\t\tTagReqID, []byte(hex.EncodeToString(reqID)),\n\t\t\tTagRand, []byte(rand.Rat.FloatString(RandPrec)),\n\t\t))\n\n\t\thandledRandReqNum++\n\t}\n\n\tctx.Logger().Info(fmt.Sprintf(\"%d rand requests are handled\", handledRandReqNum))\n\treturn\n}", "func BlockSetSpec(blockSet *BlockSet) *Spec {\n\treturn &Spec{\n\t\tBlock: &Spec_BlockSet{\n\t\t\tBlockSet: blockSet,\n\t\t},\n\t}\n}", "func generateNewBlock(oldBlock Block, dataPayload string) (Block, error) {\n\n\tvar newBlock Block\n\ttimeNow := time.Now()\n\n\tnewBlock.Index = oldBlock.Index + 1\n\tnewBlock.Timestamp = timeNow.String()\n\n\tnewEvent, err := dataPayloadtoServiceEvent(dataPayload)\n\n\tif err != nil {\n\t\tlog.Println(\"ERROR: Unable to convert data payload into ServiceEvent for new block generation.\")\n\t}\n\n\tnewBlock.Event = newEvent\n\tnewBlock.PrevHash = oldBlock.Hash\n\tnewBlock.Hash = calculateHash(newBlock)\n\n\treturn newBlock, nil\n}", "func (_Rootchain *RootchainFilterer) WatchNewHeaderBlock(opts *bind.WatchOpts, sink chan<- *RootchainNewHeaderBlock, proposer []common.Address, headerBlockId []*big.Int, reward []*big.Int) (event.Subscription, error) {\n\n\tvar proposerRule []interface{}\n\tfor _, proposerItem := range proposer {\n\t\tproposerRule = append(proposerRule, proposerItem)\n\t}\n\tvar headerBlockIdRule []interface{}\n\tfor _, headerBlockIdItem := range headerBlockId {\n\t\theaderBlockIdRule = append(headerBlockIdRule, headerBlockIdItem)\n\t}\n\tvar rewardRule []interface{}\n\tfor _, rewardItem := range reward {\n\t\trewardRule = append(rewardRule, rewardItem)\n\t}\n\n\tlogs, sub, err := _Rootchain.contract.WatchLogs(opts, \"NewHeaderBlock\", proposerRule, headerBlockIdRule, rewardRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(RootchainNewHeaderBlock)\n\t\t\t\tif err := _Rootchain.contract.UnpackLog(event, \"NewHeaderBlock\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func NewBlockAttrs(name, elementType string, required bool) *Spec {\n\treturn BlockAttrsSpec(&BlockAttrs{\n\t\tName: name,\n\t\tRequired: required,\n\t\tType: elementType,\n\t})\n}", "func BlockListSpec(blockList *BlockList) *Spec {\n\treturn &Spec{\n\t\tBlock: &Spec_BlockList{\n\t\t\tBlockList: blockList,\n\t\t},\n\t}\n}", "func NewLegacyBlockHeader(version int32, prevHash, merkleRootHash *chainhash.Hash,\n\tbits uint32, nonce uint32) *BlockHeader {\n\tnounce256 := Uint256FromUint32(nonce)\n\treturn NewBlockHeader(version, prevHash, merkleRootHash, 0, bits, &nounce256, []byte{})\n}", "func NewBlockHeader(version int32, shardIndex shard.Index, height int64,\n\tprevHash *chainhash.Hash, blockBodyHash *chainhash.Hash,\n\toutsMerkleRoot *merkle.MerkleHash, shardLedgerMerkleRoot *merkle.MerkleHash, depositTxsOuts []OutPoint, withdrawTxsOuts []OutPoint) *BlockHeader {\n\n\t// Limit the timestamp to one second precision since the protocol\n\t// doesn't support better.\n\treturn &BlockHeader{\n\t\tVersion: version,\n\t\tShardIndex: shardIndex,\n\t\tHeight: height,\n\t\tPrevBlockHeader: *prevHash,\n\t\tBlockBodyHash: *blockBodyHash,\n\t\tOutsMerkleRoot: *outsMerkleRoot,\n\t\tShardLedgerMerkleRoot: *shardLedgerMerkleRoot,\n\t\tDepositTxsOuts: depositTxsOuts,\n\t\tWithdrawTxsOuts: withdrawTxsOuts,\n\t}\n}", "func (l *Ledger) FormatFakeBlock(txList []*pb.Transaction,\n\tproposer []byte, ecdsaPk *ecdsa.PrivateKey, /*矿工的公钥私钥*/\n\ttimestamp int64, curTerm int64, curBlockNum int64,\n\tpreHash []byte, utxoTotal *big.Int, blockHeight int64) (*pb.InternalBlock, error) {\n\treturn l.formatBlock(txList, proposer, ecdsaPk, timestamp, curTerm, curBlockNum, preHash, 0, utxoTotal, false, nil, nil, blockHeight)\n}", "func NewPbftBlockHeader(n, gasLimit, gasUsed int64, parentHash, txnsHash []byte) *pb.PbftBlockHeader {\n\theader := &pb.PbftBlockHeader{\n\t\tNumber: n,\n\t\tGasLimit: gasLimit,\n\t\tGasUsed: gasUsed,\n\t\tTimestamp: time.Now().Unix(),\n\t\tParentHash: parentHash,\n\t\tTxnsHash: txnsHash,\n\t}\n\n\treturn header\n}", "func (*GetBlockByHeightRequest) Descriptor() ([]byte, []int) {\n\treturn file_service_proto_rawDescGZIP(), []int{13}\n}", "func NewBlockHeader(version int32, prevHash, merkleRootHash *chainhash.Hash,\n\theight uint32, bits uint32, nonce *[32]byte, solution []byte) *BlockHeader {\n\n\t// Limit the timestamp to one second precision since the protocol\n\t// doesn't support better.\n\tsolutionCopy := make([]byte, len(solution))\n\tcopy(solutionCopy, solution)\n\treturn &BlockHeader{\n\t\tVersion: version,\n\t\tPrevBlock: *prevHash,\n\t\tMerkleRoot: *merkleRootHash,\n\t\tTimestamp: time.Unix(time.Now().Unix(), 0),\n\t\tHeight: height,\n\t\tReserved: [7]uint32{},\n\t\tBits: bits,\n\t\tNonce: *nonce,\n\t\tSolution: solutionCopy,\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewBootstrapSpec initializes a new BootstrapSpec from a job.BootstrapSpec
func NewBootstrapSpec(spec *job.BootstrapSpec) *BootstrapSpec { return &BootstrapSpec{ ContractID: spec.ContractID, Relay: spec.Relay, RelayConfig: spec.RelayConfig, BlockchainTimeout: spec.BlockchainTimeout, ContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval, ContractConfigConfirmations: spec.ContractConfigConfirmations, CreatedAt: spec.CreatedAt, UpdatedAt: spec.UpdatedAt, } }
[ "func newBootstrapTemplate(e2eCtx *E2EContext) *cfn_bootstrap.Template {\n\tBy(\"Creating a bootstrap AWSIAMConfiguration\")\n\tt := cfn_bootstrap.NewTemplate()\n\tt.Spec.BootstrapUser.Enable = true\n\tt.Spec.SecureSecretsBackends = []v1alpha3.SecretBackend{\n\t\tv1alpha3.SecretBackendSecretsManager,\n\t\tv1alpha3.SecretBackendSSMParameterStore,\n\t}\n\tregion, err := credentials.ResolveRegion(\"\")\n\tExpect(err).NotTo(HaveOccurred())\n\tt.Spec.Region = region\n\tstr, err := yaml.Marshal(t.Spec)\n\tExpect(err).NotTo(HaveOccurred())\n\tExpect(ioutil.WriteFile(path.Join(e2eCtx.Settings.ArtifactFolder, \"awsiamconfiguration.yaml\"), str, 0644)).To(Succeed())\n\tcfnData, err := t.RenderCloudFormation().YAML()\n\tExpect(err).NotTo(HaveOccurred())\n\tExpect(ioutil.WriteFile(path.Join(e2eCtx.Settings.ArtifactFolder, \"cloudformation.yaml\"), cfnData, 0644)).To(Succeed())\n\treturn &t\n}", "func NewBootstrap(router *mux.Router) *Bootstrap {\n\treturn &Bootstrap{\n\t\trouter: router,\n\t}\n}", "func NewBootstrap(router *echo.Echo, serviceName string) *Bootstrap {\n\treturn &Bootstrap{\n\t\trouter: router,\n\t\tserviceName: serviceName,\n\t}\n}", "func New(conf Config) *Bootstrap {\n\treturn &Bootstrap{\n\t\tConfig: conf,\n\t\tcancelCh: make(chan struct{}),\n\t}\n}", "func TestNewBootstrapController(t *testing.T) {\n\t// Tests a subset of inputs to ensure they are set properly in the controller\n\tmaster, _, assert := setUp(t)\n\tportRange := util.PortRange{Base: 10, Size: 10}\n\n\tmaster.namespaceRegistry = namespace.NewRegistry(nil)\n\tmaster.serviceRegistry = registrytest.NewServiceRegistry()\n\tmaster.endpointRegistry = endpoint.NewRegistry(nil)\n\n\tmaster.serviceNodePortRange = portRange\n\tmaster.masterCount = 1\n\tmaster.serviceReadWritePort = 1000\n\tmaster.publicReadWritePort = 1010\n\n\tcontroller := master.NewBootstrapController()\n\n\tassert.Equal(controller.NamespaceRegistry, master.namespaceRegistry)\n\tassert.Equal(controller.EndpointRegistry, master.endpointRegistry)\n\tassert.Equal(controller.ServiceRegistry, master.serviceRegistry)\n\tassert.Equal(controller.ServiceNodePortRange, portRange)\n\tassert.Equal(controller.MasterCount, master.masterCount)\n\tassert.Equal(controller.ServicePort, master.serviceReadWritePort)\n\tassert.Equal(controller.PublicServicePort, master.publicReadWritePort)\n}", "func NewSpec(serviceAccountName string, containers []corev1.Container, volumes []corev1.Volume) *Builder {\n\treturn &Builder{spec: newPodSpec(serviceAccountName, containers, volumes)}\n}", "func NewSpec(details *SpecDetails) *Spec {\n\treturn &Spec{\n\t\tDetails: details,\n\t\tServices: NewServiceList(),\n\t\tStatus: SpecWaiting,\n\t}\n}", "func jobFromGitHubSpec(namePrefix string,\n\tspec *protocol.JobSpec,\n\tactiveDeadlineSec int64,\n\tnodeSelector map[string]string,\n\tcpuRequest resource.Quantity,\n\tentryPointName string,\n\tpvcName string) *v13.Job {\n\n\tbackOffLimit := int32(1)\n\taccessMode := int32(ConfigMapAccessMode)\n\n\ttheJob := &v13.Job{\n\t\tObjectMeta: v12.ObjectMeta{\n\t\t\tGenerateName: namePrefix,\n\t\t},\n\n\t\tSpec: v13.JobSpec{\n\t\t\tBackoffLimit: &backOffLimit,\n\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tRestartPolicy: v1.RestartPolicyOnFailure,\n\t\t\t\t\tActiveDeadlineSeconds: &activeDeadlineSec,\n\n\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: ContainerNameBuilder,\n\t\t\t\t\t\t\tCommand: []string{\"/jobscripts/entrypoint.sh\", \"||\", \"sleep 5\"},\n\n\t\t\t\t\t\t\t// Image\n\t\t\t\t\t\t\tImage: spec.Image.Name,\n\t\t\t\t\t\t\tImagePullPolicy: v1.PullIfNotPresent,\n\n\t\t\t\t\t\t\tEnv: convertEnvVars(spec.Variables),\n\n\t\t\t\t\t\t\tVolumeMounts: []v1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"jobscripts\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/jobscripts\",\n\t\t\t\t\t\t\t\t\tReadOnly: true,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"buildpvc\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/build\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\tResources: v1.ResourceRequirements{\n\t\t\t\t\t\t\t\tRequests: v1.ResourceList{\n\t\t\t\t\t\t\t\t\tv1.ResourceCPU: cpuRequest,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tVolumes: []v1.Volume{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"jobscripts\",\n\t\t\t\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\t\t\t\tConfigMap: &v1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\t\tLocalObjectReference: v1.LocalObjectReference{\n\t\t\t\t\t\t\t\t\t\tName: entryPointName,\n\t\t\t\t\t\t\t\t\t},\n\n\t\t\t\t\t\t\t\t\tDefaultMode: &accessMode,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"buildpvc\",\n\t\t\t\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\t\t\t\tPersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\t\t\t\tClaimName: pvcName,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\n\t\t\t\t\tNodeSelector: nodeSelector,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn theJob\n}", "func GenJobSpec(jobID, image string, cf *ConfigSetting, entryPoint []string, command []string, labels map[string]string, webhookSetting *WebhookSetting) *batchv1.Job {\n\t// TODO: retry should be changeable ?\n\tbackoffLimit := int32(0)\n\tttlSecondsAfterFinished := int32(3600)\n\tpodSpec := apiv1.PodSpec{\n\t\tContainers: []apiv1.Container{\n\t\t\t{\n\t\t\t\tName: \"smile-job\",\n\t\t\t\tImage: image,\n\t\t\t\tCommand: entryPoint,\n\t\t\t\tArgs: command,\n\t\t\t\t// Resources: apiv1.ResourceRequirements{\n\t\t\t\t// \tLimits: apiv1.ResourceList{\n\t\t\t\t// \t\t\"cpu\": resource.MustParse(cpuLimit),\n\t\t\t\t// \t\t\"memory\": resource.MustParse(memLimit),\n\t\t\t\t// \t},\n\t\t\t\t// \tRequests: apiv1.ResourceList{\n\t\t\t\t// \t\t\"cpu\": resource.MustParse(cpuReq),\n\t\t\t\t// \t\t\"memory\": resource.MustParse(memReq),\n\t\t\t\t// \t},\n\t\t\t\t// },\n\t\t\t\tEnv: []apiv1.EnvVar{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"MY_POD_NAMESPACE\",\n\t\t\t\t\t\tValueFrom: &apiv1.EnvVarSource{\n\t\t\t\t\t\t\tFieldRef: &apiv1.ObjectFieldSelector{\n\t\t\t\t\t\t\t\tFieldPath: \"metadata.namespace\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tRestartPolicy: apiv1.RestartPolicyNever,\n\t}\n\n\t//case for need injection configmap\n\tif cf != nil {\n\t\t//declare a volume for configmap\n\t\tpodSpec.Volumes = []apiv1.Volume{\n\t\t\t{\n\t\t\t\tName: \"cofig-vol\",\n\t\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\t\tConfigMap: &apiv1.ConfigMapVolumeSource{\n\t\t\t\t\t\tLocalObjectReference: apiv1.LocalObjectReference{\n\t\t\t\t\t\t\tName: cf.ConfigMapRef,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\t//volumn mount\n\t\tpodSpec.Containers[0].VolumeMounts = []apiv1.VolumeMount{\n\t\t\t{\n\t\t\t\tName: \"cofig-vol\",\n\t\t\t\tMountPath: cf.ConfigDir,\n\t\t\t},\n\t\t}\n\t\t//pass a env var for app know configmap dir path\n\t\tpodSpec.Containers[0].Env = append(podSpec.Containers[0].Env, apiv1.EnvVar{\n\t\t\tName: \"CONFIGDIR\",\n\t\t\tValue: cf.ConfigDir,\n\t\t})\n\t}\n\n\tobjectMeta := metav1.ObjectMeta{\n\t\tName: jobID,\n\t}\n\n\tif webhookSetting != nil {\n\t\t//injection into annotations\n\t\tobjectMeta.Annotations = decodeWebhookConfig(webhookSetting)\n\t\tlabels[\"webhook-enable\"] = \"true\"\n\t} else {\n\t\tlabels[\"webhook-enable\"] = \"false\"\n\t}\n\n\treturn &batchv1.Job{\n\t\tObjectMeta: objectMeta,\n\t\tSpec: batchv1.JobSpec{\n\t\t\tBackoffLimit: &backoffLimit,\n\t\t\t//FEATURE STATE: Kubernetes v1.12 [alpha]\n\t\t\tTTLSecondsAfterFinished: &ttlSecondsAfterFinished,\n\t\t\tTemplate: apiv1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: podSpec,\n\t\t\t},\n\t\t},\n\t}\n}", "func NewSpec(api *gmail.Service, db *db.DB) (*Spec, error) {\n\tlog.SetLevel(log.DebugLevel)\n\tlog.Info(\"starting new spec\")\n\n\tbytes, err := ioutil.ReadFile(\"./spec.yaml\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read file: %v\", err)\n\t}\n\n\tspec := &Spec{\n\t\tapi: api,\n\t\tdb: db,\n\t}\n\n\terr = yaml.Unmarshal(bytes, spec)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal: %v\", err)\n\t}\n\n\treturn spec, nil\n}", "func NewCmdBootstrap(name, fullName string) *cobra.Command {\n\to := NewBootstrapOptions()\n\n\tbootstrapCmd := &cobra.Command{\n\t\tUse: name,\n\t\tShort: bootstrapShortDesc,\n\t\tLong: bootstrapLongDesc,\n\t\tExample: fmt.Sprintf(bootstrapExample, fullName),\n\t\tArgs: cobra.ExactArgs(2),\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tgenericclioptions.GenericRun(o, cmd, args)\n\t\t},\n\t}\n\n\tbootstrapCmd.Flags().StringVarP(&o.prefix, \"prefix\", \"p\", \"\", \"add a prefix to the environment names\")\n\treturn bootstrapCmd\n}", "func (bsc BootstrapConfiguration) New(\n\trsOpts result.Options,\n\topts storage.Options,\n\ttopoMapProvider topology.MapProvider,\n\torigin topology.Host,\n\tadminClient client.AdminClient,\n) (bootstrap.ProcessProvider, error) {\n\tidxOpts := opts.IndexOptions()\n\tcompactor, err := compaction.NewCompactor(idxOpts.MetadataArrayPool(),\n\t\tindex.MetadataArrayPoolCapacity,\n\t\tidxOpts.SegmentBuilderOptions(),\n\t\tidxOpts.FSTSegmentOptions(),\n\t\tcompaction.CompactorOptions{\n\t\t\tFSTWriterOptions: &fst.WriterOptions{\n\t\t\t\t// DisableRegistry is set to true to trade a larger FST size\n\t\t\t\t// for a faster FST compaction since we want to reduce the end\n\t\t\t\t// to end latency for time to first index a metric.\n\t\t\t\tDisableRegistry: true,\n\t\t\t},\n\t\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar (\n\t\tbs bootstrap.BootstrapperProvider\n\t\tfsOpts = opts.CommitLogOptions().FilesystemOptions()\n\t\torderedBootstrappers = bsc.orderedBootstrappers()\n\t)\n\t// Start from the end of the list because the bootstrappers are ordered by precedence in descending order.\n\t// I.e. each bootstrapper wraps the preceding bootstrapper, and so the outer-most bootstrapper is run first.\n\tfor i := len(orderedBootstrappers) - 1; i >= 0; i-- {\n\t\tswitch orderedBootstrappers[i] {\n\t\tcase bootstrapper.NoOpAllBootstrapperName:\n\t\t\tbs = bootstrapper.NewNoOpAllBootstrapperProvider()\n\t\tcase bootstrapper.NoOpNoneBootstrapperName:\n\t\t\tbs = bootstrapper.NewNoOpNoneBootstrapperProvider()\n\t\tcase bfs.FileSystemBootstrapperName:\n\t\t\tfsCfg := bsc.filesystemConfig()\n\t\t\tfsbOpts := bfs.NewOptions().\n\t\t\t\tSetInstrumentOptions(opts.InstrumentOptions()).\n\t\t\t\tSetResultOptions(rsOpts).\n\t\t\t\tSetFilesystemOptions(fsOpts).\n\t\t\t\tSetIndexOptions(opts.IndexOptions()).\n\t\t\t\tSetPersistManager(opts.PersistManager()).\n\t\t\t\tSetIndexClaimsManager(opts.IndexClaimsManager()).\n\t\t\t\tSetCompactor(compactor).\n\t\t\t\tSetRuntimeOptionsManager(opts.RuntimeOptionsManager()).\n\t\t\t\tSetIdentifierPool(opts.IdentifierPool()).\n\t\t\t\tSetMigrationOptions(fsCfg.migration().NewOptions()).\n\t\t\t\tSetStorageOptions(opts).\n\t\t\t\tSetIndexSegmentsVerify(bsc.VerifyOrDefault().VerifyIndexSegmentsOrDefault())\n\t\t\tif v := bsc.IndexSegmentConcurrency; v != nil {\n\t\t\t\tfsbOpts = fsbOpts.SetIndexSegmentConcurrency(*v)\n\t\t\t}\n\t\t\tif err := fsbOpts.Validate(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbs, err = bfs.NewFileSystemBootstrapperProvider(fsbOpts, bs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tcase commitlog.CommitLogBootstrapperName:\n\t\t\tcCfg := bsc.commitlogConfig()\n\t\t\tcOpts := commitlog.NewOptions().\n\t\t\t\tSetResultOptions(rsOpts).\n\t\t\t\tSetCommitLogOptions(opts.CommitLogOptions()).\n\t\t\t\tSetRuntimeOptionsManager(opts.RuntimeOptionsManager()).\n\t\t\t\tSetReturnUnfulfilledForCorruptCommitLogFiles(cCfg.ReturnUnfulfilledForCorruptCommitLogFiles)\n\t\t\tif err := cOpts.Validate(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tinspection, err := fs.InspectFilesystem(fsOpts)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbs, err = commitlog.NewCommitLogBootstrapperProvider(cOpts, inspection, bs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tcase peers.PeersBootstrapperName:\n\t\t\tpCfg := bsc.peersConfig()\n\t\t\tpOpts := peers.NewOptions().\n\t\t\t\tSetResultOptions(rsOpts).\n\t\t\t\tSetFilesystemOptions(fsOpts).\n\t\t\t\tSetIndexOptions(opts.IndexOptions()).\n\t\t\t\tSetAdminClient(adminClient).\n\t\t\t\tSetPersistManager(opts.PersistManager()).\n\t\t\t\tSetIndexClaimsManager(opts.IndexClaimsManager()).\n\t\t\t\tSetCompactor(compactor).\n\t\t\t\tSetRuntimeOptionsManager(opts.RuntimeOptionsManager()).\n\t\t\t\tSetContextPool(opts.ContextPool())\n\t\t\tif pCfg.StreamShardConcurrency != nil {\n\t\t\t\tpOpts = pOpts.SetDefaultShardConcurrency(*pCfg.StreamShardConcurrency)\n\t\t\t}\n\t\t\tif pCfg.StreamPersistShardConcurrency != nil {\n\t\t\t\tpOpts = pOpts.SetShardPersistenceConcurrency(*pCfg.StreamPersistShardConcurrency)\n\t\t\t}\n\t\t\tif pCfg.StreamPersistShardFlushConcurrency != nil {\n\t\t\t\tpOpts = pOpts.SetShardPersistenceFlushConcurrency(*pCfg.StreamPersistShardFlushConcurrency)\n\t\t\t}\n\t\t\tif v := bsc.IndexSegmentConcurrency; v != nil {\n\t\t\t\tpOpts = pOpts.SetIndexSegmentConcurrency(*v)\n\t\t\t}\n\t\t\tif err := pOpts.Validate(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbs, err = peers.NewPeersBootstrapperProvider(pOpts, bs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tcase uninitialized.UninitializedTopologyBootstrapperName:\n\t\t\tuOpts := uninitialized.NewOptions().\n\t\t\t\tSetResultOptions(rsOpts).\n\t\t\t\tSetInstrumentOptions(opts.InstrumentOptions())\n\t\t\tif err := uOpts.Validate(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbs = uninitialized.NewUninitializedTopologyBootstrapperProvider(uOpts, bs)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unknown bootstrapper: %s\", orderedBootstrappers[i])\n\t\t}\n\t}\n\n\tproviderOpts := bootstrap.NewProcessOptions().\n\t\tSetTopologyMapProvider(topoMapProvider).\n\t\tSetOrigin(origin)\n\tif bsc.CacheSeriesMetadata != nil {\n\t\tproviderOpts = providerOpts.SetCacheSeriesMetadata(*bsc.CacheSeriesMetadata)\n\t}\n\treturn bootstrap.NewProcessProvider(bs, providerOpts, rsOpts, fsOpts)\n}", "func NewBootstrapCommand(rootSettings *environment.AirshipCTLSettings) *cobra.Command {\n\tbootstrapRootCmd := &cobra.Command{\n\t\tUse: \"bootstrap\",\n\t\tShort: \"Bootstrap ephemeral Kubernetes cluster\",\n\t}\n\n\tISOGenCmd := NewISOGenCommand(bootstrapRootCmd, rootSettings)\n\tbootstrapRootCmd.AddCommand(ISOGenCmd)\n\n\tremoteDirectCmd := NewRemoteDirectCommand(rootSettings)\n\tbootstrapRootCmd.AddCommand(remoteDirectCmd)\n\n\treturn bootstrapRootCmd\n}", "func New() *Spec {\n\treturn &Spec{}\n}", "func NewBoot(opts ...BootOption) *Boot {\n\tboot := &Boot{}\n\n\tfor i := range opts {\n\t\topts[i](boot)\n\t}\n\n\tif len(boot.BootConfigPath) < 1 {\n\t\tboot.BootConfigPath = \"boot.yaml\"\n\t}\n\n\t// Register and bootstrap internal entries with boot config.\n\trkentry.RegisterInternalEntriesFromConfig(boot.BootConfigPath)\n\n\t// Register external entries.\n\tregFuncList := rkentry.ListEntryRegFunc()\n\tfor i := range regFuncList {\n\t\tregFuncList[i](boot.BootConfigPath)\n\t}\n\n\treturn boot\n}", "func bootstrap(withoutDc ...bool) {\n\tvar err error\n\tgomega.Expect(err).NotTo(gomega.HaveOccurred())\n\t// ctx\n\t_, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tif framework.TestContext.RepoRoot != \"\" {\n\t\ttestfiles.AddFileSource(testfiles.RootFileSource{Root: framework.TestContext.RepoRoot})\n\t}\n\tframework.TestContext.Provider = \"local\"\n}", "func NewProbe(bootstrapPath string) ready.Prober {\n\treturn &probe{bootstrapPath: bootstrapPath}\n}", "func NewBootstrapper(client client.Client, namespace string) component.DeployWaiter {\n\treturn &bootstrapper{\n\t\tclient: client,\n\t\tnamespace: namespace,\n\t}\n}", "func NewBootstrapController(\n\tkubeClient kubernetes.Interface,\n\tklusterletInformer operatorinformer.KlusterletInformer,\n\tsecretInformer coreinformer.SecretInformer,\n\trecorder events.Recorder) factory.Controller {\n\tcontroller := &bootstrapController{\n\t\tkubeClient: kubeClient,\n\t\tklusterletLister: klusterletInformer.Lister(),\n\t\tsecretLister: secretInformer.Lister(),\n\t}\n\treturn factory.New().WithSync(controller.sync).\n\t\tWithInformersQueueKeyFunc(bootstrapSecretQueueKeyFunc(controller.klusterletLister), secretInformer.Informer()).\n\t\tToController(\"BootstrapController\", recorder)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewJobResource initializes a new JSONAPI job resource
func NewJobResource(j job.Job) *JobResource { resource := &JobResource{ JAID: NewJAIDInt32(j.ID), Name: j.Name.ValueOrZero(), Type: JobSpecType(j.Type), SchemaVersion: j.SchemaVersion, GasLimit: j.GasLimit, ForwardingAllowed: j.ForwardingAllowed, MaxTaskDuration: j.MaxTaskDuration, PipelineSpec: NewPipelineSpec(j.PipelineSpec), ExternalJobID: j.ExternalJobID, } switch j.Type { case job.DirectRequest: resource.DirectRequestSpec = NewDirectRequestSpec(j.DirectRequestSpec) case job.FluxMonitor: resource.FluxMonitorSpec = NewFluxMonitorSpec(j.FluxMonitorSpec) case job.Cron: resource.CronSpec = NewCronSpec(j.CronSpec) case job.OffchainReporting: resource.OffChainReportingSpec = NewOffChainReportingSpec(j.OCROracleSpec) case job.OffchainReporting2: resource.OffChainReporting2Spec = NewOffChainReporting2Spec(j.OCR2OracleSpec) case job.Keeper: resource.KeeperSpec = NewKeeperSpec(j.KeeperSpec) case job.VRF: resource.VRFSpec = NewVRFSpec(j.VRFSpec) case job.Webhook: resource.WebhookSpec = NewWebhookSpec(j.WebhookSpec) case job.BlockhashStore: resource.BlockhashStoreSpec = NewBlockhashStoreSpec(j.BlockhashStoreSpec) case job.BlockHeaderFeeder: resource.BlockHeaderFeederSpec = NewBlockHeaderFeederSpec(j.BlockHeaderFeederSpec) case job.Bootstrap: resource.BootstrapSpec = NewBootstrapSpec(j.BootstrapSpec) case job.Gateway: resource.GatewaySpec = NewGatewaySpec(j.GatewaySpec) } jes := []JobError{} for _, e := range j.JobSpecErrors { jes = append(jes, NewJobError((e))) } resource.Errors = jes return resource }
[ "func (c *Controller) NewJob() (*Job, error) {\n var job Job\n if err := job.Init(strconv.Itoa(c.NextJobId), c.NamePrefix); err != nil {\n return nil, err\n }\n\n c.NextJobId += 1\n return &job, nil\n}", "func (a *API) newJob(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t// load the form to create a new job\n\tj := &job.Job{}\n\terr := renderPage(w, \"jobForm.html\", j)\n\tif err != nil {\n\t\tsendErrorMessage(w)\n\t}\n}", "func NewJob(configFile, currentFile, command, image, resources, namespace, container string, timeout time.Duration) (*Job, error) {\n\tif len(configFile) == 0 {\n\t\treturn nil, errors.New(\"Config file is required\")\n\t}\n\tif len(currentFile) == 0 {\n\t\treturn nil, errors.New(\"Template file is required\")\n\t}\n\tif len(container) == 0 {\n\t\treturn nil, errors.New(\"Container is required\")\n\t}\n\tvar resourceRequirements corev1.ResourceRequirements\n\tif len(resources) != 0 {\n\t\tif err := json.Unmarshal([]byte(resources), &resourceRequirements); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tclient, err := newClient(os.ExpandEnv(configFile))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdownloaded, err := downloadFile(currentFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbytes, err := ioutil.ReadFile(downloaded)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar currentJob v1.Job\n\terr = yaml.Unmarshal(bytes, &currentJob)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcurrentJob.SetName(generateRandomName(currentJob.Name))\n\tif len(namespace) > 0 {\n\t\tcurrentJob.SetNamespace(namespace)\n\t}\n\tp := shellwords.NewParser()\n\targs, err := p.Parse(command)\n\tlog.Info(\"Received args:\")\n\tfor _, arg := range args {\n\t\tlog.Info(arg)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Job{\n\t\tclient,\n\t\t&currentJob,\n\t\targs,\n\t\timage,\n\t\tresourceRequirements,\n\t\tnamespace,\n\t\tcontainer,\n\t\ttimeout,\n\t}, nil\n}", "func NewJobResource(j job.Job) *JobResource {\n\tresource := &JobResource{\n\t\tJAID: NewJAIDInt32(j.ID),\n\t\tName: j.Name.ValueOrZero(),\n\t\tType: JobSpecType(j.Type),\n\t\tSchemaVersion: j.SchemaVersion,\n\t\tMaxTaskDuration: j.MaxTaskDuration,\n\t\tPipelineSpec: NewPipelineSpec(j.PipelineSpec),\n\t}\n\n\tswitch j.Type {\n\tcase job.DirectRequest:\n\t\tresource.DirectRequestSpec = NewDirectRequestSpec(j.DirectRequestSpec)\n\tcase job.FluxMonitor:\n\t\tresource.FluxMonitorSpec = NewFluxMonitorSpec(j.FluxMonitorSpec)\n\tcase job.OffchainReporting:\n\t\tresource.OffChainReportingSpec = NewOffChainReportingSpec(j.OffchainreportingOracleSpec)\n\tcase job.Keeper:\n\t\tresource.KeeperSpec = NewKeeperSpec(j.KeeperSpec)\n\t}\n\n\tjes := []JobError{}\n\tfor _, e := range j.JobSpecErrors {\n\t\tjes = append(jes, NewJobError((e)))\n\t}\n\tresource.Errors = jes\n\n\treturn resource\n}", "func NewJob(payload []byte) (job Job, err error) {\n err = json.Unmarshal(payload, &job)\n return\n}", "func NewJob(ctx *pulumi.Context,\n\tname string, args *JobArgs, opts ...pulumi.ResourceOption) (*Job, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.JobCollectionName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'JobCollectionName'\")\n\t}\n\tif args.ResourceGroupName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ResourceGroupName'\")\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:scheduler/v20160301:Job\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:scheduler:Job\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:scheduler:Job\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:scheduler/v20140801preview:Job\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:scheduler/v20140801preview:Job\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:scheduler/v20160101:Job\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:scheduler/v20160101:Job\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource Job\n\terr := ctx.RegisterResource(\"azure-native:scheduler/v20160301:Job\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewJob(ctx *pulumi.Context,\n\tname string, args *JobArgs, opts ...pulumi.ResourceOption) (*Job, error) {\n\tif args == nil {\n\t\targs = &JobArgs{}\n\t}\n\n\treplaceOnChanges := pulumi.ReplaceOnChanges([]string{\n\t\t\"location\",\n\t\t\"project\",\n\t})\n\topts = append(opts, replaceOnChanges)\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Job\n\terr := ctx.RegisterResource(\"google-native:dataflow/v1b3:Job\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (a *API) createJob(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\n\t// get request body\n\tvar j job.Job\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tsendErrorMessage(w)\n\t}\n\n\t// create a new job object (j) based on the input body\n\terr = json.Unmarshal(body, &j)\n\tif err != nil {\n\t\tsendErrorMessage(w)\n\t}\n\ta.jobstore.Create(&j)\n\tjob, err := json.Marshal(j)\n\tif err != nil {\n\t\tsendErrorMessage(w)\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(job)\n}", "func (c *Client) CreateNewJob(job *Job) error {\n\tbody, err := json.Marshal(job)\n\tif err != nil {\n\t\tlogrus.WithError(err)\n\t\treturn err\n\t}\n\n\turl := fmt.Sprintf(\"http://%s/api/v1/jobs\", c.options.ServerAddr)\n\tresp, err := http.Post(url, \"application/json\", bytes.NewReader(body))\n\tif err != nil {\n\t\tlogrus.WithError(err)\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlogrus.WithError(err)\n\t}\n\treturn err\n}", "func (env *Env) Create_job(writer http.ResponseWriter, req *http.Request) {\n\tjob := new(models.Job)\n\n\terr := json.NewDecoder(req.Body).Decode(job)\n\tif err != nil {\n\t\trespond(writer, \"Your format sucked\", &models.My_error{err.Error(), http.StatusBadRequest})\n\t\treturn\n\t}\n\n\tjob, err = models.Store_job(env.Db, job, false)\n\tif err != nil {\n\t\trespond(writer,\n\t\t\t\"Unable to store job\",\n\t\t\t&models.My_error{err.Error(), http.StatusInternalServerError})\n\t} else {\n\t\trespond(writer, job, nil)\n\t}\n}", "func NewJob(data []byte) Job {\n\tuuid := uuid.New()\n\treturn Job{ID: uuid.String(), Data: data}\n}", "func (j existingJob) Create() error {\n\treturn createExistingResource(j)\n}", "func newJobJob(dbJob *models.Job) (j *JobJob, err error) {\n\tj = &JobJob{Job: NewJob(dbJob)}\n\tj.TargetJob, err = models.FindJob(dbJob.ObjectID)\n\tif err != nil {\n\t\treturn j, err\n\t}\n\tif j.TargetJob == nil {\n\t\treturn j, fmt.Errorf(\"job id %d does not exist\", dbJob.ObjectID)\n\t}\n\treturn j, err\n}", "func (c *client) startNewJob(ctx context.Context, opts launcher.LaunchOptions, jobInterface v12.JobInterface, ns string, safeName string, safeSha string) ([]runtime.Object, error) {\n\tlog.Logger().Infof(\"about to create a new job for name %s and sha %s\", safeName, safeSha)\n\n\t// lets see if we are using a version stream to store the git operator configuration\n\tfolder := filepath.Join(opts.Dir, \"versionStream\", \"git-operator\")\n\texists, err := files.DirExists(folder)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to check if folder exists %s\", folder)\n\t}\n\tif !exists {\n\t\t// lets try the original location\n\t\tfolder = filepath.Join(opts.Dir, \".jx\", \"git-operator\")\n\t}\n\n\tjobFileName := \"job.yaml\"\n\n\tfileNamePath := filepath.Join(opts.Dir, \".jx\", \"git-operator\", \"filename.txt\")\n\texists, err = files.FileExists(fileNamePath)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to check for file %s\", fileNamePath)\n\t}\n\tif exists {\n\t\tdata, err := ioutil.ReadFile(fileNamePath)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to load file %s\", fileNamePath)\n\t\t}\n\t\tjobFileName = strings.TrimSpace(string(data))\n\t\tif jobFileName == \"\" {\n\t\t\treturn nil, errors.Errorf(\"the job name file %s is empty\", fileNamePath)\n\t\t}\n\t}\n\n\tfileName := filepath.Join(folder, jobFileName)\n\texists, err = files.FileExists(fileName)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to find file %s in repository %s\", fileName, safeName)\n\t}\n\tif !exists {\n\t\treturn nil, errors.Errorf(\"repository %s does not have a Job file: %s\", safeName, fileName)\n\t}\n\n\tresource := &v1.Job{}\n\terr = yamls.LoadFile(fileName, resource)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to load Job file %s in repository %s\", fileName, safeName)\n\t}\n\n\tif !opts.NoResourceApply {\n\t\t// now lets check if there is a resources dir\n\t\tresourcesDir := filepath.Join(folder, \"resources\")\n\t\texists, err = files.DirExists(resourcesDir)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to check if resources directory %s exists in repository %s\", resourcesDir, safeName)\n\t\t}\n\t\tif exists {\n\t\t\tabsDir, err := filepath.Abs(resourcesDir)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"failed to get absolute resources dir %s\", resourcesDir)\n\t\t\t}\n\n\t\t\tcmd := &cmdrunner.Command{\n\t\t\t\tName: \"kubectl\",\n\t\t\t\tArgs: []string{\"apply\", \"-f\", absDir},\n\t\t\t}\n\t\t\tlog.Logger().Infof(\"running command: %s\", cmd.CLI())\n\t\t\t_, err = c.runner(cmd)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"failed to apply resources in dir %s\", absDir)\n\t\t\t}\n\t\t}\n\t}\n\n\t// lets try use a maximum of 31 characters and a minimum of 10 for the sha\n\tnamePrefix := trimLength(safeName, 20)\n\n\tid := uuid.New().String()\n\tresourceName := namePrefix + \"-\" + id\n\n\tresource.Name = resourceName\n\n\tif resource.Labels == nil {\n\t\tresource.Labels = map[string]string{}\n\t}\n\tresource.Labels[constants.DefaultSelectorKey] = constants.DefaultSelectorValue\n\tresource.Labels[launcher.RepositoryLabelKey] = safeName\n\tresource.Labels[launcher.CommitShaLabelKey] = safeSha\n\n\tr2, err := jobInterface.Create(ctx, resource, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create Job %s in namespace %s\", resourceName, ns)\n\t}\n\tlog.Logger().Infof(\"created Job %s in namespace %s\", resourceName, ns)\n\treturn []runtime.Object{r2}, nil\n}", "func NewJob(ctx *pulumi.Context,\n\tname string, args *JobArgs, opts ...pulumi.ResourceOption) (*Job, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.RoleArn == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'RoleArn'\")\n\t}\n\tif args.Type == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Type'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Job\n\terr := ctx.RegisterResource(\"aws-native:databrew:Job\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewJob(arrTime int) (j *Job) {\n\tj = new(Job)\n\tj.IntAttrs = make(map[string]int)\n\tj.StrAttrs = make(map[string]string)\n\tj.JobId = rand.Int63()\n\tj.ArrTime = arrTime\n\treturn j\n}", "func (m *Master) constructJobResources(c *Config, restStorage map[string]rest.Storage) {\n\t// Note that job's storage settings are changed by changing the batch\n\t// group. Clearly we want all jobs to be stored in the same place no\n\t// matter where they're accessed from.\n\trestOptions := func(resource string) generic.RESTOptions {\n\t\treturn generic.RESTOptions{\n\t\t\tStorage: c.StorageDestinations.Search([]string{batch.GroupName, extensions.GroupName}, resource),\n\t\t\tDecorator: m.StorageDecorator(),\n\t\t\tDeleteCollectionWorkers: m.deleteCollectionWorkers,\n\t\t}\n\t}\n\tjobStorage, jobStatusStorage := jobetcd.NewREST(restOptions(\"jobs\"))\n\trestStorage[\"jobs\"] = jobStorage\n\trestStorage[\"jobs/status\"] = jobStatusStorage\n}", "func (r *realKubeClient) CreateJob(job *Job) error {\n\turl := \"/apis/extensions/v1beta1/namespaces/\" + job.Metadata[\"namespace\"].(string) + \"/jobs\"\n\tdata, err := json.Marshal(job)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbyteData := bytes.NewReader(data)\n\treturn r.doPost(url, byteData)\n\n}", "func NewJobResources(js []job.Job) []JobResource {\n\trs := []JobResource{}\n\n\tfor _, j := range js {\n\t\trs = append(rs, *NewJobResource(j))\n\t}\n\n\treturn rs\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithGroupIDConsumerOption provides an option to modify the GroupID for a consumer Group
func WithGroupIDConsumerOption(groupID string) ConsumerOption { return func(c *Consumer) { c.config.GroupID = groupID } }
[ "func WithConsumerGroupID(groupID string) ConfigOpt {\n\treturn func(c *kafkalib.ConfigMap) {\n\t\t_ = c.SetKey(\"group.id\", groupID)\n\t}\n}", "func ConsumerSetGroupID(groupID string) model.Option {\n\treturn model.FuncOption(func(d *model.Dispatcher) { d.ConsumerGroupID = groupID })\n}", "func DeliverGroup(g string) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.DeliverGroup = g\n\t\treturn nil\n\t}\n}", "func applyServerGroupID(opts servers.CreateOptsBuilder, serverGroupID string) servers.CreateOptsBuilder {\n\tif serverGroupID != \"\" {\n\t\treturn schedulerhints.CreateOptsExt{\n\t\t\tCreateOptsBuilder: opts,\n\t\t\tSchedulerHints: schedulerhints.SchedulerHints{\n\t\t\t\tGroup: serverGroupID,\n\t\t\t},\n\t\t}\n\t}\n\treturn opts\n}", "func (c ConsumerConfig) Apply(kafkaConf *kafkalib.ConfigMap) {\n\tif id := c.GroupID; id != \"\" {\n\t\t_ = kafkaConf.SetKey(\"group.id\", id)\n\t}\n}", "func WithGroup(group guid.GUID) ProviderOpt {\n\treturn func(opts *providerOpts) {\n\t\topts.group = group\n\t}\n}", "func ConsumerID(id string) ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\tif id == \"\" {\n\t\t\treturn errors.New(\"invalid consumer id\")\n\t\t}\n\t\to.ConsumerID = id\n\t\treturn nil\n\t}\n}", "func WithGroupID(groupID uint32) DescriptorSelectorFunc {\n\treturn func(d Descriptor) (bool, error) {\n\t\tif groupID == 0 {\n\t\t\treturn false, ErrInvalidGroupID\n\t\t}\n\t\treturn d.GetGroupID() == groupID, nil\n\t}\n}", "func (cl *Client) AssignGroup(group string, opts ...GroupOpt) {\n\tc := &cl.consumer\n\n\tc.assignMu.Lock()\n\tdefer c.assignMu.Unlock()\n\n\tif wasDead := c.unsetAndWait(); wasDead {\n\t\treturn\n\t}\n\n\tctx, cancel := context.WithCancel(cl.ctx)\n\tg := &groupConsumer{\n\t\tc: c,\n\t\tcl: cl,\n\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tmanageDone: make(chan struct{}),\n\n\t\tid: group,\n\n\t\tbalancers: []GroupBalancer{\n\t\t\tCooperativeStickyBalancer(),\n\t\t},\n\t\tprotocol: \"consumer\",\n\t\tcooperative: true, // default yes, potentially canceled below by our balancers\n\n\t\ttps: newTopicsPartitions(),\n\n\t\tusing: make(map[string]int),\n\t\trejoinCh: make(chan struct{}, 1),\n\t\theartbeatForceCh: make(chan func(error)),\n\t\treSeen: make(map[string]struct{}),\n\n\t\tsessionTimeout: 45000 * time.Millisecond,\n\t\trebalanceTimeout: 60000 * time.Millisecond,\n\t\theartbeatInterval: 3000 * time.Millisecond,\n\n\t\tautocommitInterval: 5 * time.Second,\n\t}\n\tif c.cl.cfg.txnID == nil {\n\t\tg.onRevoked = g.defaultRevoke\n\t\t// We do not want to commit in onLost, so we explicitly set\n\t\t// onLost to an empty function to avoid the fallback to\n\t\t// onRevoked.\n\t\tg.onLost = func(context.Context, map[string][]int32) {}\n\t} else {\n\t\tg.autocommitDisable = true\n\t}\n\tfor _, opt := range opts {\n\t\topt.apply(g)\n\t}\n\tif len(group) == 0 || len(g.topics) == 0 || c.dead {\n\t\treturn\n\t}\n\n\tif g.commitCallback == nil {\n\t\tg.commitCallback = g.defaultCommitCallback\n\t}\n\n\tdefer c.storeGroup(g)\n\tdefer cl.triggerUpdateMetadata(true) // we definitely want to trigger a metadata update\n\n\tfor _, balancer := range g.balancers {\n\t\tg.cooperative = g.cooperative && balancer.IsCooperative()\n\t}\n\n\t// Ensure all topics exist so that we will fetch their metadata.\n\tif !g.regexTopics {\n\t\ttopics := make([]string, 0, len(g.topics))\n\t\tfor topic := range g.topics {\n\t\t\ttopics = append(topics, topic)\n\t\t}\n\t\tg.tps.storeTopics(topics)\n\t}\n\n\tif !g.autocommitDisable && g.autocommitInterval > 0 {\n\t\tg.cl.cfg.logger.Log(LogLevelInfo, \"beginning autocommit loop\")\n\t\tgo g.loopCommit()\n\t}\n}", "func NewDeleteConsumerGroupCommand(f *factory.Factory) *cobra.Command {\n\topts := &Options{\n\t\tConnection: f.Connection,\n\t\tCfgHandler: f.CfgHandler,\n\t\tIO: f.IOStreams,\n\t\tLogger: f.Logger,\n\t\tlocalizer: f.Localizer,\n\t}\n\n\tcmd := &cobra.Command{\n\t\tUse: opts.localizer.LocalizeByID(\"kafka.consumerGroup.delete.cmd.use\"),\n\t\tShort: opts.localizer.LocalizeByID(\"kafka.consumerGroup.delete.cmd.shortDescription\"),\n\t\tLong: opts.localizer.LocalizeByID(\"kafka.consumerGroup.delete.cmd.longDescription\"),\n\t\tExample: opts.localizer.LocalizeByID(\"kafka.consumerGroup.delete.cmd.example\"),\n\t\tArgs: cobra.NoArgs,\n\t\tRunE: func(cmd *cobra.Command, args []string) (err error) {\n\t\t\tif opts.kafkaID != \"\" {\n\t\t\t\treturn runCmd(opts)\n\t\t\t}\n\n\t\t\tif !f.CfgHandler.Cfg.HasKafka() {\n\t\t\t\treturn errors.New(opts.localizer.LocalizeByID(\"kafka.consumerGroup.common.error.noKafkaSelected\"))\n\t\t\t}\n\n\t\t\topts.kafkaID = opts.CfgHandler.Cfg.Services.Kafka.ClusterID\n\n\t\t\treturn runCmd(opts)\n\t\t},\n\t}\n\n\topts.localizer.LocalizeByID(\"kafka.consumerGroup.common.flag.id.description\", localize.NewEntry(\"Action\", \"delete\"))\n\tcmd.Flags().BoolVarP(&opts.skipConfirm, \"yes\", \"y\", false, opts.localizer.LocalizeByID(\"kafka.consumerGroup.delete.flag.yes.description\"))\n\tcmd.Flags().StringVar(&opts.id, \"id\", \"\", opts.localizer.LocalizeByID(\"kafka.consumerGroup.common.flag.id.description\", localize.NewEntry(\"Action\", \"delete\")))\n\t_ = cmd.MarkFlagRequired(\"id\")\n\n\t// flag based completions for ID\n\t_ = cmd.RegisterFlagCompletionFunc(\"id\", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {\n\t\treturn cmdutil.FilterValidConsumerGroupIDs(f, toComplete)\n\t})\n\n\treturn cmd\n}", "func WithGroupID(groupID uint32) DescriptorSelectorFunc {\n\treturn func(d Descriptor) (bool, error) {\n\t\tif groupID == 0 {\n\t\t\treturn false, ErrInvalidGroupID\n\t\t}\n\t\treturn d.GroupID() == groupID, nil\n\t}\n}", "func (o *GroupV2AddOptionalConversationParams) SetGroupID(groupID int64) {\n\to.GroupID = groupID\n}", "func (r *Replicator) UpdateConsumerGroup(ctx thrift.Context, updateRequest *shared.UpdateConsumerGroupRequest) (*shared.ConsumerGroupDescription, error) {\n\tr.m3Client.IncCounter(metrics.ReplicatorUpdateCgScope, metrics.ReplicatorRequests)\n\n\tcgDesc, err := r.metaClient.UpdateConsumerGroup(ctx, updateRequest)\n\tif err != nil {\n\t\tr.logger.WithFields(bark.Fields{\n\t\t\tcommon.TagCnsPth: common.FmtCnsPth(updateRequest.GetConsumerGroupName()),\n\t\t\tcommon.TagDstPth: common.FmtDstPth(updateRequest.GetDestinationPath()),\n\t\t\tcommon.TagDst: common.FmtDst(cgDesc.GetDestinationUUID()),\n\t\t\tcommon.TagErr: err,\n\t\t}).Error(`Error updating cg`)\n\t\tr.m3Client.IncCounter(metrics.ReplicatorUpdateCgScope, metrics.ReplicatorFailures)\n\t\treturn nil, err\n\t}\n\n\tr.logger.WithFields(bark.Fields{\n\t\tcommon.TagCnsPth: common.FmtCnsPth(updateRequest.GetConsumerGroupName()),\n\t\tcommon.TagCnsm: common.FmtCnsm(cgDesc.GetConsumerGroupUUID()),\n\t\tcommon.TagDstPth: common.FmtDstPth(updateRequest.GetDestinationPath()),\n\t\tcommon.TagDst: common.FmtDst(cgDesc.GetDestinationUUID()),\n\t\tcommon.TagDLQID: common.FmtDLQID(cgDesc.GetDeadLetterQueueDestinationUUID()),\n\t\t`IsMultiZone`: cgDesc.GetIsMultiZone(),\n\t\t`ActiveZone`: cgDesc.GetActiveZone(),\n\t}).Info(`Updated cg`)\n\treturn cgDesc, nil\n}", "func WithConsumer(consumer string) ConsumerOption {\n\treturn ConsumerOption(consumer)\n}", "func (o ConsumerGroupOutput) ConsumerId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ConsumerGroup) pulumi.StringOutput { return v.ConsumerId }).(pulumi.StringOutput)\n}", "func withGroupID(id int) groupOption {\n\treturn func(m *GroupMutation) {\n\t\tvar (\n\t\t\terr error\n\t\t\tonce sync.Once\n\t\t\tvalue *Group\n\t\t)\n\t\tm.oldValue = func(ctx context.Context) (*Group, error) {\n\t\t\tonce.Do(func() {\n\t\t\t\tif m.done {\n\t\t\t\t\terr = fmt.Errorf(\"querying old values post mutation is not allowed\")\n\t\t\t\t} else {\n\t\t\t\t\tvalue, err = m.Client().Group.Get(ctx, id)\n\t\t\t\t}\n\t\t\t})\n\t\t\treturn value, err\n\t\t}\n\t\tm.id = &id\n\t}\n}", "func (ec *EventCreate) SetGroupID(id int) *EventCreate {\n\tec.mutation.SetGroupID(id)\n\treturn ec\n}", "func (m *kafkaConsumerGroupManagerImpl) StartConsumerGroup(ctx context.Context, groupId string, topics []string, handler KafkaConsumerHandler, ref types.NamespacedName, options ...SaramaConsumerHandlerOption) error {\n\tlogger := logging.FromContext(ctx)\n\n\tgroupLogger := m.logger.With(zap.String(\"GroupId\", groupId))\n\tgroupLogger.Info(\"Creating New Managed ConsumerGroup\")\n\tgroup, err := m.factory.createConsumerGroup(groupId)\n\tif err != nil {\n\t\tgroupLogger.Error(\"Failed To Create New Managed ConsumerGroup\")\n\t\treturn err\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\n\t// consume is passed in to the KafkaConsumerGroupFactory so that it will call the manager's\n\t// consume() function instead of the one on the internal sarama ConsumerGroup. This allows the\n\t// manager to continue to block in the Consume call while a group goes through a stop/start cycle.\n\tconsume := func(ctx context.Context, topics []string, handler sarama.ConsumerGroupHandler) error {\n\t\tlogger.Debug(\"Consuming Messages On managed Consumer Group\", zap.String(\"GroupId\", groupId))\n\t\treturn m.consume(ctx, groupId, topics, handler)\n\t}\n\n\t// The only thing we really want from the factory is the cancel function for the customConsumerGroup\n\tcustomGroup := m.factory.startExistingConsumerGroup(groupId, group, consume, topics, logger, handler, ref, options...)\n\tmanagedGrp := createManagedGroup(ctx, m.logger, group, cancel, customGroup.cancel)\n\n\t// Add the Sarama ConsumerGroup we obtained from the factory to the managed group map,\n\t// so that it can be stopped and started via control-protocol messages.\n\tm.setGroup(groupId, managedGrp)\n\tm.notify(ManagerEvent{Event: GroupCreated, GroupId: groupId})\n\treturn nil\n}", "func (o EventSourceMappingAmazonManagedKafkaEventSourceConfigOutput) ConsumerGroupId() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v EventSourceMappingAmazonManagedKafkaEventSourceConfig) *string { return v.ConsumerGroupId }).(pulumi.StringPtrOutput)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithTopicConsumerOption provides an option to modify the topic on which the Consumer will listen to
func WithTopicConsumerOption(topic string) ConsumerOption { return func(c *Consumer) { c.config.Topic = topic } }
[ "func WithConsumer(consumer string) ConsumerOption {\n\treturn ConsumerOption(consumer)\n}", "func (c *Consumer) SetTopic(topic string) *Consumer {\n\tif topic != \"\" {\n\t\tc.mutex.Lock()\n\t\tc.bind.SetKey(topic)\n\t\tc.mutex.Unlock()\n\t\tc.SetQueueName(true, \"\")\n\t\tc.SetChannelKey(true, \"\")\n\t}\n\treturn c\n}", "func WithTopic(ctx context.Context, topic string) context.Context {\n\treturn context.WithValue(ctx, topicKey{}, topic)\n}", "func WithOffsetConsumerOption(offset int64) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tswitch offset {\n\t\tcase LastOffset:\n\t\t\tc.config.StartOffset = LastOffset\n\t\tcase FirstOffset:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\tdefault:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\t}\n\t}\n}", "func WithReaderConsumerOption(reader *kafgo.Reader) ConsumerOption {\n\treturn func(c *Consumer) { c.reader = reader }\n}", "func (conn *ProtoConnection) ConsumeTopic(msgClb func(messaging.ProtoMessage), topics ...string) error {\n\tconn.multiplexer.rwlock.Lock()\n\tdefer conn.multiplexer.rwlock.Unlock()\n\n\tif conn.multiplexer.started {\n\t\treturn fmt.Errorf(\"ConsumeTopic can be called only if the multiplexer has not been started yet\")\n\t}\n\n\tbyteClb := func(bm *client.ConsumerMessage) {\n\t\tpm := client.NewProtoConsumerMessage(bm, conn.serializer)\n\t\tmsgClb(pm)\n\t}\n\n\tfor _, topic := range topics {\n\t\t// check if we have already consumed the topic\n\t\tvar found bool\n\t\tvar subs *consumerSubscription\n\tLoopSubs:\n\t\tfor _, subscription := range conn.multiplexer.mapping {\n\t\t\tif subscription.manual == true {\n\t\t\t\t// do not mix dynamic and manual mode\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif subscription.topic == topic {\n\t\t\t\tfound = true\n\t\t\t\tsubs = subscription\n\t\t\t\tbreak LoopSubs\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\tsubs = &consumerSubscription{\n\t\t\t\tmanual: false, // non-manual example\n\t\t\t\ttopic: topic,\n\t\t\t\tconnectionName: conn.name,\n\t\t\t\tbyteConsMsg: byteClb,\n\t\t\t}\n\t\t\t// subscribe new topic\n\t\t\tconn.multiplexer.mapping = append(conn.multiplexer.mapping, subs)\n\t\t}\n\n\t\t// add subscription to consumerList\n\t\tsubs.byteConsMsg = byteClb\n\t}\n\n\treturn nil\n}", "func WithEndpointConsumerOption(end endpoint.Endpoint) ConsumerOption {\n\treturn func(c *Consumer) { c.end = end }\n}", "func WithUpdateTopicConfig(cfg pubsub.TopicConfigToUpdate) Option {\n\treturn func(c *Config) {\n\t\tc.TopicConfig = &cfg\n\t}\n}", "func WithTopicOptions(topicConfig *pubsub.TopicConfig) Option {\n\treturn func(b *EventBus) error {\n\t\tb.topicConfig = topicConfig\n\n\t\treturn nil\n\t}\n}", "func WithTopicPrefix(prefix string) Option {\n\treturn topicPrefix(prefix)\n}", "func WithTopicPrefix(topicPrefix string) Option {\n\treturn func(c *queue) {\n\t\tc.topicPrefix = topicPrefix\n\t}\n}", "func WithAutoCommitConsumerOption(flag bool) ConsumerOption {\n\treturn func(c *Consumer) { c.autocommit = flag }\n}", "func WithTopics(topics []string) manifest.CfgFn {\n\treturn func(cfg map[string]interface{}) {\n\t\tif topics != nil {\n\t\t\tcfg[\"topics\"] = topics\n\t\t}\n\t}\n}", "func NewConsumer(topics []string, valueFactory ValueFactory, opts ...ConsumerOption) (*Consumer, error) {\n\tc := &Consumer{\n\t\tvalueFactory: valueFactory,\n\t\tavroAPI: avro.DefaultConfig,\n\t\tensureTopics: true,\n\t}\n\t// Loop through each option\n\tfor _, opt := range opts {\n\t\t// apply option\n\t\topt.applyC(c)\n\t}\n\n\tvar err error\n\n\t// if consumer not provided - make one\n\tif c.KafkaConsumer == nil {\n\t\t// if kafka config not provided - build default one\n\t\tif c.kafkaCfg == nil {\n\t\t\tvar envCfg struct {\n\t\t\t\tBroker string `env:\"KAFKA_BROKER\" envDefault:\"localhost:9092\"`\n\t\t\t\tCAFile string `env:\"KAFKA_CA_FILE\"`\n\t\t\t\tKeyFile string `env:\"KAFKA_KEY_FILE\"`\n\t\t\t\tCertificateFile string `env:\"KAFKA_CERTIFICATE_FILE\"`\n\t\t\t\tGroupID string `env:\"KAFKA_GROUP_ID\"`\n\t\t\t}\n\t\t\tif err := env.Parse(&envCfg); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// default configuration\n\t\t\tc.kafkaCfg = &kafka.ConfigMap{\n\t\t\t\t\"bootstrap.servers\": envCfg.Broker,\n\t\t\t\t\"socket.keepalive.enable\": true,\n\t\t\t\t\"enable.auto.commit\": false,\n\t\t\t\t\"enable.partition.eof\": true,\n\t\t\t\t\"session.timeout.ms\": 6000,\n\t\t\t\t\"auto.offset.reset\": \"earliest\",\n\t\t\t\t\"group.id\": envCfg.GroupID,\n\t\t\t}\n\n\t\t\tif envCfg.CAFile != \"\" {\n\t\t\t\t// configure SSL\n\t\t\t\tc.kafkaCfg.SetKey(\"security.protocol\", \"ssl\")\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.ca.location\", envCfg.CAFile)\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.key.location\", envCfg.KeyFile)\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.certificate.location\", envCfg.CertificateFile)\n\t\t\t}\n\t\t}\n\n\t\tif c.KafkaConsumer, err = kafka.NewConsumer(c.kafkaCfg); err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"cannot initialize kafka consumer\")\n\t\t}\n\t}\n\n\tif c.srClient == nil {\n\t\tif c.srURL == nil {\n\t\t\tvar envCfg struct {\n\t\t\t\tSchemaRegistry *url.URL `env:\"KAFKA_SCHEMA_REGISTRY\" envDefault:\"http://localhost:8081\"`\n\t\t\t}\n\t\t\tif err := env.Parse(&envCfg); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.srURL = envCfg.SchemaRegistry\n\t\t}\n\n\t\tif c.srClient, err = NewCachedSchemaRegistryClient(c.srURL.String()); err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"cannot initialize schema registry client\")\n\t\t}\n\t}\n\n\tif c.eventHandler == nil {\n\t\tc.eventHandler = func(event kafka.Event) {\n\t\t\tlog.Println(event)\n\t\t}\n\t}\n\n\tif topics != nil {\n\t\tif err := c.KafkaConsumer.SubscribeTopics(topics, nil); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif c.ensureTopics {\n\t\t\tif err = c.EnsureTopics(topics); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn c, nil\n}", "func WithCheckpoint(checkpoint Checkpoint) Option {\n\treturn func(c *Consumer) error {\n\t\tc.checkpoint = checkpoint\n\t\treturn nil\n\t}\n}", "func (conn *ProtoConnection) ConsumeTopicOnPartition(msgClb func(messaging.ProtoMessage), topic string, partition int32, offset int64) error {\n\tconn.multiplexer.rwlock.Lock()\n\tdefer conn.multiplexer.rwlock.Unlock()\n\n\tif conn.multiplexer.started {\n\t\treturn fmt.Errorf(\"ConsumeTopicOnPartition can be called only if the multiplexer has not been started yet\")\n\t}\n\n\tbyteClb := func(bm *client.ConsumerMessage) {\n\t\tpm := client.NewProtoConsumerMessage(bm, conn.serializer)\n\t\tmsgClb(pm)\n\t}\n\n\t// check if we have already consumed the topic on partition and offset\n\tvar found bool\n\tvar subs *consumerSubscription\n\n\tfor _, subscription := range conn.multiplexer.mapping {\n\t\tif subscription.manual == false {\n\t\t\t// do not mix dynamic and manual mode\n\t\t\tcontinue\n\t\t}\n\t\tif subscription.topic == topic && subscription.partition == partition && subscription.offset == offset {\n\t\t\tfound = true\n\t\t\tsubs = subscription\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !found {\n\t\tsubs = &consumerSubscription{\n\t\t\tmanual: true, // manual example\n\t\t\ttopic: topic,\n\t\t\tpartition: partition,\n\t\t\toffset: offset,\n\t\t\tconnectionName: conn.name,\n\t\t\tbyteConsMsg: byteClb,\n\t\t}\n\t\t// subscribe new topic on partition\n\t\tconn.multiplexer.mapping = append(conn.multiplexer.mapping, subs)\n\t}\n\n\t// add subscription to consumerList\n\tsubs.byteConsMsg = byteClb\n\n\treturn nil\n}", "func (b *addPushNotificationsOnChannelsBuilder) Topic(topic string) *addPushNotificationsOnChannelsBuilder {\n\tb.opts.Topic = topic\n\treturn b\n}", "func (c *ConsumerManager) AddConsumer(topic, channel string, client ConsumerClient) error {\n\n}", "func WithConsumerLogger(logger log.Logger) ConsumerOption {\n\treturn func(consumer *Consumer) {\n\t\tconsumer.logger = logger\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithMaxMinByteConsumerOption provides an option to modify the min/max byte that can written to kafka
func WithMaxMinByteConsumerOption(min, max int) ConsumerOption { return func(c *Consumer) { c.config.MinBytes = min c.config.MaxBytes = max } }
[ "func (o *consumer) setMaxPendingBytes(limit int) {\n\to.pblimit = limit\n\to.maxpb = limit / 16\n\tif o.maxpb == 0 {\n\t\to.maxpb = 1\n\t}\n}", "func MaxRequestMaxBytes(max int) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.MaxRequestMaxBytes = max\n\t\treturn nil\n\t}\n}", "func WithMaxInflightBytes(n int) WriterOption {\n\treturn func(ms *ManagedStream) {\n\t\tms.streamSettings.MaxInflightBytes = n\n\t}\n}", "func MaxRequestBatch(max uint) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.MaxRequestBatch = int(max)\n\t\treturn nil\n\t}\n}", "func WithEventConsumerBufferSize(value uint) options.Opt {\n\treturn func(p options.Params) {\n\t\tif setter, ok := p.(eventConsumerBufferSizeSetter); ok {\n\t\t\tsetter.SetEventConsumerBufferSize(value)\n\t\t}\n\t}\n}", "func WithMaxBatchSizeBytes(n int64) WriteHandlerOption {\n\treturn func(w *WriteHandler) {\n\t\tw.maxBatchSizeBytes = n\n\t}\n}", "func WithMaxInboxSize(size int) ConvergerOpt {\n\treturn func(converger *converger) {\n\t\tconverger.inbox = make(chan convergeRequest, size)\n\t}\n}", "func MaxMessageSize(size int64) Option {\n\tif size < 0 {\n\t\tpanic(\"size must be non-negative\")\n\t}\n\treturn func(ws *websocket) {\n\t\tws.options.maxMessageSize = size\n\t}\n}", "func EntryByteLimit(n int) LoggerOption { return entryByteLimit(n) }", "func (m *MailTips) SetMaxMessageSize(value *int32)() {\n err := m.GetBackingStore().Set(\"maxMessageSize\", value)\n if err != nil {\n panic(err)\n }\n}", "func WithMaxBytes(maxBytes int) LimitedWriterOption {\n\tbytesWritten := 0\n\treturn func(w io.WriteCloser) io.WriteCloser {\n\t\tpreCheck := NewPreWriteCallbacks(w, func(p []byte) error {\n\t\t\tif bytesWritten+len(p) > maxBytes {\n\t\t\t\tif err := w.Close(); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"failed to close WriteCloser writing maxBytes; Close error was: %w\", err)\n\t\t\t\t}\n\t\t\t\treturn ErrTooLargeWrite\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\treturn NewPostWriteCallbacks(preCheck, func(p []byte, n int, err error) {\n\t\t\tbytesWritten += n\n\t\t})\n\t}\n}", "func MaxMsgSize(n int) Option {\n\treturn func(o *Options) {\n\t\to.MaxMsgSize = n\n\t}\n}", "func WithMaxChunkSize(size string) Option {\n\treturn func(c *client) error {\n\t\tb, err := unit.ParseBytes(size)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif int64(b) >= s3manager.MinUploadPartSize {\n\t\t\tc.maxChunkSize = int64(b)\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func (formatter *Formatter) SetMinBufSize(size int) {\n\tformatter.lock.Lock()\n\tdefer formatter.lock.Unlock()\n\n\tif size == 0 {\n\t\tformatter.minBufSize = 256\n\t} else {\n\t\tformatter.minBufSize = size\n\t}\n}", "func SetBufMaxSize(maxSize int) BufWriterOpt {\n\treturn func(w *BufWriter) {\n\t\tif maxSize <= defaultBufMaxSize {\n\t\t\tw.maxSize = defaultBufMaxSize\n\t\t\treturn\n\t\t}\n\t\tw.maxSize = maxSize\n\t}\n}", "func MaxPktSize(size int) CodecOpt {\n\treturn func(c *Codec) {\n\t\tif size > maxPktSize || size <= 0 {\n\t\t\tc.maxPktSize = maxPktSize\n\t\t}\n\t\tc.maxPktSize = size\n\t}\n}", "func BufferedByteLimit(n int) LoggerOption { return bufferedByteLimit(n) }", "func StreamQueryFewerConsumersThan(c uint) StreamQueryOpt {\n\treturn func(q *streamQuery) error {\n\t\ti := int(c)\n\t\tq.consumersLimit = &i\n\t\treturn nil\n\t}\n}", "func MaxHeaderBytes(v int) Option {\n\treturn optionSetter(func(opt *Options) {\n\t\topt.MaxHeaderBytes = v\n\t})\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithAutoCommitConsumerOption sets the autocommit property of consumer
func WithAutoCommitConsumerOption(flag bool) ConsumerOption { return func(c *Consumer) { c.autocommit = flag } }
[ "func WithAutoCommitTimeConsumerOption(dur time.Duration) ConsumerOption {\n\treturn func(c *Consumer) { c.config.CommitInterval = dur }\n}", "func AutoAck() ConsumerOpt {\n\treturn func(c *Consumer) {\n\t\tc.autoAck = true\n\t}\n}", "func AutoCommitInterval(interval time.Duration) GroupOpt {\n\treturn groupOpt{func(cfg *groupConsumer) { cfg.autocommitInterval = interval }}\n}", "func (q *Query) autoCommit() {\n\tif q.AutoCommit {\n\t\t_ = q.Tx.Commit()\n\t\tq.Tx = nil\n\t}\n}", "func CommitSync() OptionFunc {\n\treturn func(c *Component) error {\n\t\tif c.saramaConfig != nil && c.saramaConfig.Consumer.Offsets.AutoCommit.Enable {\n\t\t\t// redundant commits warning\n\t\t\tlog.Warn(\"consumer is set to commit offsets after processing each batch and auto-commit is enabled\")\n\t\t}\n\t\tc.commitSync = true\n\t\treturn nil\n\t}\n}", "func DisableAutoCommit() GroupOpt {\n\treturn groupOpt{func(cfg *groupConsumer) { cfg.autocommitDisable = true }}\n}", "func consumerTestWithCommits(t *testing.T, testname string, msgcnt int, useChannel bool, consumeFunc func(c *Consumer, mt *msgtracker, expCnt int), rebalanceCb func(c *Consumer, event Event) error) {\n\tconsumerTest(t, testname+\" auto commit\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, autoCommit: true}, consumeFunc, rebalanceCb)\n\n\tconsumerTest(t, testname+\" using CommitMessage() API\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitMessageAPI}, consumeFunc, rebalanceCb)\n\n\tconsumerTest(t, testname+\" using CommitOffsets() API\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitOffsetsAPI}, consumeFunc, rebalanceCb)\n\n\tconsumerTest(t, testname+\" using Commit() API\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitAPI}, consumeFunc, rebalanceCb)\n\n}", "func WithOffsetConsumerOption(offset int64) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tswitch offset {\n\t\tcase LastOffset:\n\t\t\tc.config.StartOffset = LastOffset\n\t\tcase FirstOffset:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\tdefault:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\t}\n\t}\n}", "func AutoCheckpoint(interval time.Duration) ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\to.AutoCheckpointInterval = interval\n\t\treturn nil\n\t}\n}", "func AutoOffsetNone() ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\to.AutoOffset = autoOffsetNone\n\t\treturn nil\n\t}\n}", "func AutoOffsetLatest() ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\to.AutoOffset = autoOffsetLatest\n\t\treturn nil\n\t}\n}", "func (c *Consumer) Commit() error {\n\tsnap := c.resetAcked()\n\tif len(snap) < 1 {\n\t\treturn nil\n\t}\n\n\tfor partitionID, offset := range snap {\n\t\t// fmt.Printf(\"$,%s,%d,%d\\n\", c.id, partitionID, offset+1)\n\t\tif err := c.zoo.Commit(c.group, c.topic, partitionID, offset+1); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func AutoAck(opt bool) Option {\n\treturn func(o *Options) {\n\t\to.AutoAck = opt\n\t}\n}", "func WithConsumer(consumer string) ConsumerOption {\n\treturn ConsumerOption(consumer)\n}", "func AutoTag() ConsumerOpt {\n\treturn func(c *Consumer) {\n\t\thost, _ := os.Hostname()\n\t\ttag := fmt.Sprintf(c.q.Name+\"-pid-%d@%s\", os.Getpid(), host)\n\t\tTag(tag)(c)\n\t}\n}", "func AutoAck(enabled bool) SubscribeOption {\n\treturn func(o *SubscribeOptions) {\n\t\to.AutoAck = enabled\n\t}\n}", "func (c CommitterProbe) SetUseAsyncCommit() {\n\tc.useAsyncCommit = 1\n}", "func CommitCallback(fn func(*kmsg.OffsetCommitRequest, *kmsg.OffsetCommitResponse, error)) GroupOpt {\n\treturn groupOpt{func(cfg *groupConsumer) { cfg.commitCallback = fn }}\n}", "func AckWait(t time.Duration) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.AckWait = t\n\t\treturn nil\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithAutoCommitTimeConsumerOption sets the auto commit time for Consumer
func WithAutoCommitTimeConsumerOption(dur time.Duration) ConsumerOption { return func(c *Consumer) { c.config.CommitInterval = dur } }
[ "func WithAutoCommitConsumerOption(flag bool) ConsumerOption {\n\treturn func(c *Consumer) { c.autocommit = flag }\n}", "func AutoCommitInterval(interval time.Duration) GroupOpt {\n\treturn groupOpt{func(cfg *groupConsumer) { cfg.autocommitInterval = interval }}\n}", "func AutoAck() ConsumerOpt {\n\treturn func(c *Consumer) {\n\t\tc.autoAck = true\n\t}\n}", "func AutoCheckpoint(interval time.Duration) ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\to.AutoCheckpointInterval = interval\n\t\treturn nil\n\t}\n}", "func AckWait(t time.Duration) ConsumerOption {\n\treturn func(o *api.ConsumerConfig) error {\n\t\to.AckWait = t\n\t\treturn nil\n\t}\n}", "func AutoOffsetLatest() ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\to.AutoOffset = autoOffsetLatest\n\t\treturn nil\n\t}\n}", "func WithOffsetConsumerOption(offset int64) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tswitch offset {\n\t\tcase LastOffset:\n\t\t\tc.config.StartOffset = LastOffset\n\t\tcase FirstOffset:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\tdefault:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\t}\n\t}\n}", "func WithConsumer(consumer string) ConsumerOption {\n\treturn ConsumerOption(consumer)\n}", "func AutoOffsetNone() ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\to.AutoOffset = autoOffsetNone\n\t\treturn nil\n\t}\n}", "func consumerTestWithCommits(t *testing.T, testname string, msgcnt int, useChannel bool, consumeFunc func(c *Consumer, mt *msgtracker, expCnt int), rebalanceCb func(c *Consumer, event Event) error) {\n\tconsumerTest(t, testname+\" auto commit\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, autoCommit: true}, consumeFunc, rebalanceCb)\n\n\tconsumerTest(t, testname+\" using CommitMessage() API\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitMessageAPI}, consumeFunc, rebalanceCb)\n\n\tconsumerTest(t, testname+\" using CommitOffsets() API\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitOffsetsAPI}, consumeFunc, rebalanceCb)\n\n\tconsumerTest(t, testname+\" using Commit() API\",\n\t\tmsgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitAPI}, consumeFunc, rebalanceCb)\n\n}", "func CommitSync() OptionFunc {\n\treturn func(c *Component) error {\n\t\tif c.saramaConfig != nil && c.saramaConfig.Consumer.Offsets.AutoCommit.Enable {\n\t\t\t// redundant commits warning\n\t\t\tlog.Warn(\"consumer is set to commit offsets after processing each batch and auto-commit is enabled\")\n\t\t}\n\t\tc.commitSync = true\n\t\treturn nil\n\t}\n}", "func AutoTag() ConsumerOpt {\n\treturn func(c *Consumer) {\n\t\thost, _ := os.Hostname()\n\t\ttag := fmt.Sprintf(c.q.Name+\"-pid-%d@%s\", os.Getpid(), host)\n\t\tTag(tag)(c)\n\t}\n}", "func (c *Consumer) Commit() error {\n\tsnap := c.resetAcked()\n\tif len(snap) < 1 {\n\t\treturn nil\n\t}\n\n\tfor partitionID, offset := range snap {\n\t\t// fmt.Printf(\"$,%s,%d,%d\\n\", c.id, partitionID, offset+1)\n\t\tif err := c.zoo.Commit(c.group, c.topic, partitionID, offset+1); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (cfg *ConsensusConfig) Commit(t time.Time) time.Time {\n\treturn t.Add(cfg.TimeoutCommit)\n}", "func (q *Query) autoCommit() {\n\tif q.AutoCommit {\n\t\t_ = q.Tx.Commit()\n\t\tq.Tx = nil\n\t}\n}", "func (cfg *ConsensusConfiguration) Commit(t time.Time) time.Time {\n\treturn t.Add(cfg.TimeoutCommit)\n}", "func AutoAck(opt bool) Option {\n\treturn func(o *Options) {\n\t\to.AutoAck = opt\n\t}\n}", "func DisableAutoCommit() GroupOpt {\n\treturn groupOpt{func(cfg *groupConsumer) { cfg.autocommitDisable = true }}\n}", "func WithTopicConsumerOption(topic string) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tc.config.Topic = topic\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithDecoderConsumerOption sets the decoder for the Consumer Message
func WithDecoderConsumerOption(fn Decoder) ConsumerOption { return func(c *Consumer) { c.dec = fn } }
[ "func WithReaderConsumerOption(reader *kafgo.Reader) ConsumerOption {\n\treturn func(c *Consumer) { c.reader = reader }\n}", "func WithDecoder(key string, dec func(body io.ReadCloser) (io.ReadCloser, error)) ToServerOption {\n\treturn func(opts *toServerOptions) {\n\t\tif opts.decoders == nil {\n\t\t\topts.decoders = map[string]func(body io.ReadCloser) (io.ReadCloser, error){}\n\t\t}\n\t\topts.decoders[key] = dec\n\t}\n}", "func WithConsumer(consumer string) ConsumerOption {\n\treturn ConsumerOption(consumer)\n}", "func NewDecoder(options ...DecoderOption) Decoder {\n\td := Decoder{}\n\tfor _, option := range options {\n\t\toption.applyDecoderOption(&d)\n\t}\n\tif d.set == nil {\n\t\td.set = charset.DefaultDecoder()\n\t}\n\tif d.ext == nil {\n\t\td.ext = charset.DefaultExtDecoder()\n\t}\n\treturn d\n}", "func WithOffsetConsumerOption(offset int64) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tswitch offset {\n\t\tcase LastOffset:\n\t\t\tc.config.StartOffset = LastOffset\n\t\tcase FirstOffset:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\tdefault:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\t}\n\t}\n}", "func NewConsumer(opt Options) *Consumer {\n\n\treturn &Consumer{\n\t\topt: opt,\n\t}\n\n}", "func NewDecoder(schemaRepository schemaregistry.Repository, options ...option) Decoder {\n\treturn &implDecoder{\n\t\tschemaRepository: schemaRepository,\n\t\tavroAPI: newConfig(options...).Freeze(),\n\t}\n}", "func NewDecoder(opts Options) (*Decoder, error) {\n\tif opts.Reader == nil {\n\t\treturn nil, errors.New(\"Options.Reader can't be nil\")\n\t}\n\treturn &Decoder{\n\t\treader: opts.Reader,\n\t}, nil\n}", "func InitConsumer(conn io.Reader, out io.Writer) *Consumer {\n\tdec := json.NewDecoder(conn)\n\tcons := new(Consumer)\n\tcons.Dec = keyEncoding.NewJsonKeyDecoder(dec)\n\tcons.Out = out\n\treturn cons\n}", "func NewDecoder(opts DecoderOptions) (*Decoder, error) {\n\tvar d Decoder\n\tif err := opts.validate(); err != nil {\n\t\treturn nil, fmt.Errorf(\"imaging: error validating decoder options: %w\", err)\n\t}\n\tif opts.ConcurrencyLevel > 0 {\n\t\td.sem = make(chan struct{}, opts.ConcurrencyLevel)\n\t}\n\td.opts = opts\n\treturn &d, nil\n}", "func WithEndpointConsumerOption(end endpoint.Endpoint) ConsumerOption {\n\treturn func(c *Consumer) { c.end = end }\n}", "func NewDecoder(input io.Reader, options ...DecoderOption) *Decoder {\n\td := &Decoder{\n\t\tInput: input,\n\t\tframer: decoderEndOfMessage,\n\t\tbufSize: defaultReaderBufferSize,\n\t\t// Added this setting of eofOK to true, to avoid 'unexpected EOF' failure (vs. standard EOF) being\n\t\t// reported when stream is closed before any data is received.\n\t\teofOK: true,\n\t}\n\tfor _, option := range options {\n\t\toption(d)\n\t}\n\td.pr, d.pw = io.Pipe()\n\tif d.s == nil {\n\t\td.s = bufio.NewScanner(input)\n\t\ttmp := make([]byte, d.bufSize)\n\t\td.s.Buffer(tmp, d.bufSize)\n\t}\n\td.s.Split(d.split)\n\treturn d\n}", "func NewDecoder(provider ConfigProvider) *Decoder {\n\td := &Decoder{\n\t\tprovider: provider,\n\t}\n\treturn d\n}", "func CustomCodecOption(codec func() Codec) Option {\n\treturn func(o *options) {\n\t\to.codec = codec\n\t}\n}", "func WithEncoderOption(eo sms.EncoderOption) Option {\n\treturn encoderOption{eo}\n}", "func Decoder(next msg.Receiver) msg.Receiver {\n\treturn msg.ReceiverFunc(func(ctx context.Context, m *msg.Message) error {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t\tif isBase64Encoded(m) {\n\t\t\t\tm.Body = base64.NewDecoder(base64.StdEncoding, m.Body)\n\t\t\t}\n\n\t\t\treturn next.Receive(ctx, m)\n\t\t}\n\t})\n}", "func CustomCodecOption(codec Codec) Option {\n\treturn func(o *options) {\n\t\to.codec = codec\n\t}\n}", "func SetDecoder(format string, decoder Decoder) {\n\tdc.SetDecoder(format, decoder)\n}", "func NewDecoder(f FormatType, r io.Reader) (dec Decoder) {\n\tvar d DecodeProvider = nil\n\n\tswitch f {\n\tcase TomlFormat:\n\t\td = NewTomlDecoder(r)\n\tcase YamlFormat:\n\t\td = NewYamlDecoder(r)\n\tcase JsonFormat:\n\t\td = json.NewDecoder(r)\n\tdefault:\n\t}\n\n\treturn Decoder{Provider: d}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithBeforeFuncsConsumerOption provides a way to set BeforeFunc(s) to the consumer
func WithBeforeFuncsConsumerOption(fns ...BeforeFunc) ConsumerOption { return func(c *Consumer) { c.befores = append(c.befores, fns...) } }
[ "func WithAfterFuncsConsumerOption(fns ...AfterFunc) ConsumerOption {\n\treturn func(c *Consumer) { c.afters = append(c.afters, fns...) }\n}", "func (c *callback) AddBeforeInvoke(conf rpc.CallbackFunc) {\n\tf, ok := beforeInvokeRegistry[conf.Name]\n\tif !ok {\n\t\tlog.DefaultLogger.Errorf(\"[runtime][rpc]can't find before filter %s\", conf.Name)\n\t\treturn\n\t}\n\tif err := f.Init(conf.Config); err != nil {\n\t\tlog.DefaultLogger.Errorf(\"[runtime][rpc]init before filter err %s\", err.Error())\n\t\treturn\n\t}\n\tc.beforeInvoke = append(c.beforeInvoke, f.Create())\n}", "func mergeBeforeFuncs(funcs ...cli.BeforeFunc) cli.BeforeFunc {\n\treturn func(c *cli.Context) error {\n\t\tcatcher := grip.NewBasicCatcher()\n\t\tfor _, f := range funcs {\n\t\t\tcatcher.Add(f(c))\n\t\t}\n\t\treturn catcher.Resolve()\n\t}\n}", "func (response *Response) Before(fn func()) {\n\tresponse.beforeFuncs = append(response.beforeFuncs, fn)\n}", "func PublisherBefore(before ...RequestFunc) PublisherOption {\n\treturn func(p *Publisher) { p.before = append(p.before, before...) }\n}", "func ProducerMsgHandlerBefore(before ...BeforeFunc) ProducerMsgOption {\n\treturn func(h *ProducerMsgHandler) { h.Before = append(h.Before, before...) }\n}", "func (s *Server) RegisterBeforeFunc(fn interface{}) error {\n\tif err := validCtxFunc(fn, s.ctxType); err != nil {\n\t\treturn err\n\t}\n\ts.beforeFns = append(s.beforeFns, reflect.ValueOf(fn))\n\treturn nil\n}", "func (x Go) Before(before func()) Go {\n\tx.before = before\n\treturn x\n}", "func (e *Experiment) Before(fnc BeforeFunc) {\n\te.before = fnc\n}", "func BeforeRun(f func()) {\n\tbeforeRun = append(beforeRun, f)\n}", "func RegisterBeforeInvoke(f BeforeFactory) {\n\tbeforeInvokeRegistry[f.Name()] = f\n}", "func (tc *TestCase) SetPreTestFunc(curFunc func(data interface{}, context *TestContext)) {\n\tif tc.PreTestFunc == nil {\n\t\ttc.PreTestFunc = curFunc\n\t}\n}", "func WithFunctionsWorker() testcontainers.CustomizeRequestOption {\n\treturn func(req *testcontainers.GenericContainerRequest) {\n\t\treq.Cmd = []string{\"/bin/bash\", \"-c\", defaultPulsarCmd}\n\n\t\t// add the waiting strategy for the functions worker\n\t\tdefaultWaitStrategies.Strategies = append(\n\t\t\tdefaultWaitStrategies.Strategies,\n\t\t\twait.ForLog(\"Function worker service started\"),\n\t\t)\n\n\t\treq.WaitingFor = defaultWaitStrategies\n\t}\n}", "func WithBefore(f RoundTripperBeforeFunc) RoundTripperOption {\n\treturn func(cfg *roundTripperConfig) {\n\t\tcfg.before = f\n\t}\n}", "func (f *Fastglue) Before(fm ...FastMiddleware) {\n\tf.before = append(f.before, fm...)\n}", "func (f *FSM) beforeEventCallbacks(ctx context.Context, e *Event) error {\n\tif fn, ok := f.callbacks[cKey{e.Event, callbackBeforeEvent}]; ok {\n\t\tfn(ctx, e)\n\t\tif e.canceled {\n\t\t\treturn CanceledError{e.Err}\n\t\t}\n\t}\n\tif fn, ok := f.callbacks[cKey{\"\", callbackBeforeEvent}]; ok {\n\t\tfn(ctx, e)\n\t\tif e.canceled {\n\t\t\treturn CanceledError{e.Err}\n\t\t}\n\t}\n\treturn nil\n}", "func (funcs *NamedFuncs) InsertBefore(before, nm string, f func()) error {\n\ti, err := funcs.FindName(before)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"InsertBefore of %s: %s\", nm, err)\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\tfuncs.InsertAt(i, nm, f)\n\treturn nil\n}", "func (c Carapace) PreInvoke(f func(cmd *cobra.Command, flag *pflag.Flag, action Action) Action) {\n\tif entry := storage.get(c.cmd); entry.preinvoke != nil {\n\t\t_f := entry.preinvoke\n\t\tentry.preinvoke = func(cmd *cobra.Command, flag *pflag.Flag, action Action) Action {\n\t\t\treturn f(cmd, flag, _f(cmd, flag, action))\n\t\t}\n\t} else {\n\t\tentry.preinvoke = f\n\t}\n}", "func (ctx *TestSuiteContext) BeforeSuite(fn func()) {\n\tctx.beforeSuiteHandlers = append(ctx.beforeSuiteHandlers, fn)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithAfterFuncsConsumerOption provides a way to set AfterFunc(s) to the consumer
func WithAfterFuncsConsumerOption(fns ...AfterFunc) ConsumerOption { return func(c *Consumer) { c.afters = append(c.afters, fns...) } }
[ "func WithBeforeFuncsConsumerOption(fns ...BeforeFunc) ConsumerOption {\n\treturn func(c *Consumer) { c.befores = append(c.befores, fns...) }\n}", "func (response *Response) After(fn func()) {\n\tresponse.afterFuncs = append(response.afterFuncs, fn)\n}", "func After(routines []func(), callback func()) {\n\tvar wg sync.WaitGroup\n\twg.Add(len(routines))\n\tfor _, routine := range routines {\n\t\tgo func(f func()) {\n\t\t\tf()\n\t\t\twg.Done()\n\t\t}(routine)\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tcallback()\n\t}()\n}", "func ProducerMsgHandlerAfter(after ...AfterFunc) ProducerMsgOption {\n\treturn func(h *ProducerMsgHandler) { h.After = append(h.After, after...) }\n}", "func (c *callback) AddAfterInvoke(conf rpc.CallbackFunc) {\n\tf, ok := afterInvokeRegistry[conf.Name]\n\tif !ok {\n\t\tlog.DefaultLogger.Errorf(\"[runtime][rpc]can't find after filter %s\", conf.Name)\n\t\treturn\n\t}\n\tif err := f.Init(conf.Config); err != nil {\n\t\tlog.DefaultLogger.Errorf(\"[runtime][rpc]init after filter err %s\", err.Error())\n\t\treturn\n\t}\n\tc.afterInvoke = append(c.afterInvoke, f.Create())\n}", "func AfterFunc(ctx *cli.Context) error {\n\treturn helper.WriteData(Data)\n}", "func WithAfter(f RoundTripperAfterFunc) RoundTripperOption {\n\treturn func(cfg *roundTripperConfig) {\n\t\tcfg.after = f\n\t}\n}", "func WithAfterTests(funcs ...func()) TestOption {\n\treturn afterTests(funcs)\n}", "func (r *Router) UseAfterFunc(f func(Context) error) {\n\tr.UseAfter(HandlerFunc(f))\n}", "func (f *Fastglue) After(fm ...FastMiddleware) {\n\tf.after = append(f.after, fm...)\n}", "func RegisterAfterShutdown(f func()) Option {\n\treturn optionFunc(func(c *config) {\n\t\tc.afterShutdown = append(c.afterShutdown, f)\n\t})\n}", "func (x Go) After(after func(), deferred ...bool) Go {\n\tx.after = after\n\tif len(deferred) > 0 {\n\t\tx.deferAfter = deferred[0]\n\t}\n\treturn x\n}", "func After(afters ...Dependency) TaskOption {\n\tvar deps []*taskDefinition\n\tfor _, a := range afters {\n\t\tdeps = append(deps, a.dependencies()...)\n\t}\n\treturn &after{deps}\n}", "func (r *Router) UseAfter(fns ...Middleware) *Router {\n\tr.middlewares.After = append(r.middlewares.After, fns...)\n\n\treturn r\n}", "func WithDecoderConsumerOption(fn Decoder) ConsumerOption {\n\treturn func(c *Consumer) { c.dec = fn }\n}", "func (fc *fakeClock) AfterFunc(d time.Duration, f func()) Timer {\n\tgoFunc := func(fn interface{}, _ time.Time) {\n\t\tgo fn.(func())()\n\t}\n\n\ts := &fakeTimer{\n\t\tclock: fc,\n\t\tuntil: fc.Now().Add(d),\n\t\tcallback: goFunc,\n\t\targ: f,\n\t\t// zero-valued c, the same as it is in the `time` pkg\n\t}\n\tfc.addTimer(s)\n\treturn s\n}", "func (cs *Callbacks) AddAfterServedCallBack(f func(context *Context)) {\n\tcs.Lock()\n\tcs.afterServed = append(cs.afterServed, f)\n\tcs.Unlock()\n}", "func (t *Time) AfterFunc(distance Distance, callback func()) Watcher {\n\treturn NewTimeWatcher(time.AfterFunc(time.Duration(distance), callback))\n}", "func (e *EventRoll) After(f ...Callabut) {\n\tif e.fired {\n\t\tfor _, v := range f {\n\t\t\te.Handlers.Add(v, nil)\n\t\t\tv(e.cache)\n\t\t}\n\t\treturn\n\t}\n\n\tfor _, v := range f {\n\t\te.Handlers.Add(v, nil)\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithEndpointConsumerOption provides a way to set endpoint to the consumer
func WithEndpointConsumerOption(end endpoint.Endpoint) ConsumerOption { return func(c *Consumer) { c.end = end } }
[ "func WithEndpoint(endpoint string) Option {\n\treturn func(cfg *config) {\n\t\tcfg.collectorEndpoint = endpoint\n\t}\n}", "func WithEndpoint(endpoint string) Opt {\n\treturn func(c *Client) {\n\t\tc.endpoint = endpoint\n\t}\n}", "func WithEndpoint(endpoint string) Option {\n\treturn func(a *Client) error {\n\t\ta.endpoint = endpoint\n\t\treturn nil\n\t}\n}", "func WithEndpoint(endpoint string) ClientOption {\n\treturn func(o *requestFactory) {\n\t\to.endpoint = endpoint\n\t}\n}", "func WithEndpoint(endpoint string) CollectorEndpointOption {\n\treturn collectorEndpointOptionFunc(func(o *collectorEndpointConfig) {\n\t\to.endpoint = endpoint\n\t})\n}", "func WithConsumer(consumer string) ConsumerOption {\n\treturn ConsumerOption(consumer)\n}", "func Endpoint(e string) Option {\n\treturn func(o *Options) {\n\t\to.Endpoint = e\n\t}\n}", "func SetEndpoint(address string) OptionsFunc {\n\treturn func(s *Server) error {\n\t\ts.endpoint = address\n\t\treturn nil\n\t}\n}", "func WithTopicConsumerOption(topic string) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tc.config.Topic = topic\n\t}\n}", "func WithOffsetConsumerOption(offset int64) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tswitch offset {\n\t\tcase LastOffset:\n\t\t\tc.config.StartOffset = LastOffset\n\t\tcase FirstOffset:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\tdefault:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\t}\n\t}\n}", "func WithDecoderConsumerOption(fn Decoder) ConsumerOption {\n\treturn func(c *Consumer) { c.dec = fn }\n}", "func (c *Client) SetEndpoint(endpoint string) {\n\tif endpoint == \"\" {\n\t\tc.endpoint = DefaultEndpoint\n\t\treturn\n\t}\n\tc.endpoint = endpoint\n}", "func WithEndpoint(ctx context.Context, endpoint ReqEndpoint) context.Context {\n\treturn context.WithValue(ctx, ReqEndpoint{}, endpoint)\n}", "func (clt *Client) SetEndpoint(endpoint string) {\n\tclt.endpoint = endpoint\n}", "func NewEndpoint(r io.Reader, w io.Writer, c io.Closer, options ...Option) (*Endpoint, error) {\n\tbw := bufio.NewWriter(w)\n\te := &Endpoint{\n\t\tdone: make(chan struct{}),\n\t\thandlers: make(map[string]*handler),\n\t\tpending: make(map[uint64]*Call),\n\t\tcloser: c,\n\t\tbw: bw,\n\t\tenc: msgpack.NewEncoder(bw),\n\t\tdec: msgpack.NewDecoder(r),\n\t}\n\tfor _, option := range options {\n\t\toption.f(e)\n\t}\n\treturn e, nil\n\n}", "func WithAPIEndpoint(endpoint string) Option {\n\treturn func(o *Options) {\n\t\to.APIEndpoint = endpoint\n\t}\n}", "func WithCollectorEndpoint(options ...CollectorEndpointOption) EndpointOption {\n\treturn endpointOptionFunc(func() (batchUploader, error) {\n\t\tcfg := &collectorEndpointConfig{\n\t\t\tendpoint: envOr(envEndpoint, \"http://localhost:14268/api/traces\"),\n\t\t\tusername: envOr(envUser, \"\"),\n\t\t\tpassword: envOr(envPassword, \"\"),\n\t\t\thttpClient: http.DefaultClient,\n\t\t}\n\n\t\tfor _, opt := range options {\n\t\t\topt.apply(cfg)\n\t\t}\n\n\t\treturn &collectorUploader{\n\t\t\tendpoint: cfg.endpoint,\n\t\t\tusername: cfg.username,\n\t\t\tpassword: cfg.password,\n\t\t\thttpClient: cfg.httpClient,\n\t\t}, nil\n\t})\n}", "func (socketlabsClient *socketlabsClient) SetEndpointURL(endpointURL string) {\n\tsocketlabsClient.EndpointURL = endpointURL\n}", "func WithMetricsEndpoint(endpoint string) Option {\n\treturn wrappedOption{otlpconfig.WithMetricsEndpoint(endpoint)}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithReaderConsumerOption lets you set the reader for kafka
func WithReaderConsumerOption(reader *kafgo.Reader) ConsumerOption { return func(c *Consumer) { c.reader = reader } }
[ "func ConsumerReader(r StreamReader) ConsumerOptionsFn {\n\treturn func(o *Consumer) error {\n\t\to.reader = r\n\t\treturn nil\n\t}\n}", "func WithReader(r reader.Reader) Option {\n\treturn func(c *client) error {\n\t\tif r != nil {\n\t\t\tc.reader = r\n\t\t}\n\t\treturn nil\n\t}\n}", "func WithReader(r reader.Reader) Option {\n\treturn func(o *Options) {\n\t\to.Reader = r\n\t}\n}", "func OptionReader(r io.Reader) Option {\n\treturn func(rw *ReadWriter) {\n\t\trw.fnRead = r.Read\n\t}\n}", "func WithReader(a adaptor.Adaptor) OptionFunc {\n\treturn func(n *Node) error {\n\t\tr, err := a.Reader()\n\t\tn.reader = r\n\t\treturn err\n\t}\n}", "func WithReader(r reader.Reader) loader.Option {\n\treturn func(o *loader.Options) {\n\t\to.Reader = r\n\t}\n}", "func WithReaderBackoff(enabled bool) Option {\n\treturn func(c *client) error {\n\t\tc.readerBackoffEnabled = enabled\n\t\treturn nil\n\t}\n}", "func NewReader(topicName string, groupID string) *kafka.Reader {\r\n\tbrokerUrls := Config.GetStringSlice(\"kafka.consumer.brokerUrls\")\r\n\tr := kafka.NewReader(kafka.ReaderConfig{\r\n\t\tBrokers: brokerUrls,\r\n\t\tGroupID: groupID,\r\n\t\tTopic: topicName,\r\n\t\tDialer: dialer,\r\n\t})\r\n\treturn r\r\n}", "func WithConsumer(consumer string) ConsumerOption {\n\treturn ConsumerOption(consumer)\n}", "func WithOffsetConsumerOption(offset int64) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tswitch offset {\n\t\tcase LastOffset:\n\t\t\tc.config.StartOffset = LastOffset\n\t\tcase FirstOffset:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\tdefault:\n\t\t\tc.config.StartOffset = FirstOffset\n\t\t}\n\t}\n}", "func WithDecoderConsumerOption(fn Decoder) ConsumerOption {\n\treturn func(c *Consumer) { c.dec = fn }\n}", "func InjectReader(reader Reader) Option {\n\treturn func(c *configClient) {\n\t\tc.reader = reader\n\t}\n}", "func WithTopicConsumerOption(topic string) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tc.config.Topic = topic\n\t}\n}", "func Reader(topic string) (reader *kafka.Reader, ok bool) {\n\treader, ok = readers[topic]\n\treturn\n}", "func (readerBuilder) WithFileReader(reader FileReader) ReaderOption {\n\treturn func(r *Reader) {\n\t\tr.fileReader = reader\n\t}\n}", "func WithTLSReader(certReader, keyReader, caReader io.Reader, serverNameOverride string, skipVerify bool) Option {\n\treturn func(c *AsyncClient) error {\n\t\tb, err := ioutil.ReadAll(certReader)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcp := x509.NewCertPool()\n\t\tif !cp.AppendCertsFromPEM(b) {\n\t\t\treturn err\n\t\t}\n\n\t\t// load cert-key pair\n\t\tcertBytes, err := ioutil.ReadAll(certReader)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tkeyBytes, err := ioutil.ReadAll(keyReader)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcert, err := tls.X509KeyPair(certBytes, keyBytes)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tc.options.tlsConfig = &tls.Config{\n\t\t\tCertificates: []tls.Certificate{cert},\n\t\t\tInsecureSkipVerify: skipVerify,\n\t\t\tClientCAs: cp,\n\t\t\tServerName: serverNameOverride,\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func (p *MockPartition) SetReader(r io.ReadCloser) *MockPartition {\n\tp.reader = r\n\treturn p\n}", "func NewConsumer(\n\tbrokers []string,\n\tlogger log.Logger,\n\toptions ...ConsumerOption,\n) (*Consumer, error) {\n\t// default values\n\tcfg := kafgo.ReaderConfig{\n\t\tBrokers: brokers,\n\t\tGroupID: defaultConsumerGroupID,\n\t\tTopic: defaultTopic,\n\t\tLogger: kafka.LoggerFunc(logger.Debugf),\n\t}\n\n\tcs := &Consumer{\n\t\treader: nil,\n\t\tconfig: &cfg,\n\t}\n\n\tfor _, o := range options {\n\t\to(cs)\n\t}\n\n\tif cs.end == nil {\n\t\treturn nil, errors.Wrap(\n\t\t\tErrCreatingConsumer, \"missing endpoint\",\n\t\t)\n\t}\n\n\tif cs.dec == nil {\n\t\treturn nil, errors.Wrap(\n\t\t\tErrCreatingConsumer, \"missing decoder\",\n\t\t)\n\t}\n\n\tif cs.errFn == nil {\n\t\tcs.errFn = defaultErrorFunc\n\t}\n\n\tif cs.errHandler == nil {\n\t\tcs.errHandler = transport.NewLogErrorHandler(logger)\n\t}\n\treturn cs, nil\n}", "func InitConsumer(conn io.Reader, out io.Writer) *Consumer {\n\tdec := json.NewDecoder(conn)\n\tcons := new(Consumer)\n\tcons.Dec = keyEncoding.NewJsonKeyDecoder(dec)\n\tcons.Out = out\n\treturn cons\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithOffsetConsumerOption lets you set the kafka offset to read from
func WithOffsetConsumerOption(offset int64) ConsumerOption { return func(c *Consumer) { switch offset { case LastOffset: c.config.StartOffset = LastOffset case FirstOffset: c.config.StartOffset = FirstOffset default: c.config.StartOffset = FirstOffset } } }
[ "func WithOffset(offset int) eventsource.QueryOption {\n\treturn func(i interface{}) {\n\t\tif o, ok := i.(*options); ok {\n\t\t\to.offset = &offset\n\t\t}\n\t}\n}", "func ReadOffset(o uint) ReadOption {\n\treturn func(r *ReadOptions) {\n\t\tr.Offset = o\n\t}\n}", "func WithReaderConsumerOption(reader *kafgo.Reader) ConsumerOption {\n\treturn func(c *Consumer) { c.reader = reader }\n}", "func (c *KafkaConsumer) SetOffset(topic string, partition int32, offset int64) error {\n\tc.partitionConsumersLock.Lock()\n\tdefer c.partitionConsumersLock.Unlock()\n\n\tif !c.exists(topic, partition) {\n\t\tlog.Info(\"Can't set offset as partition consumer for topic %s, partition %d does not exist\", topic, partition)\n\t\treturn ErrPartitionConsumerDoesNotExist\n\t}\n\n\tc.partitionConsumers[topic][partition].SetOffset(offset)\n\treturn nil\n}", "func WithOffset(offset int64) AppendOption {\n\treturn func(pw *pendingWrite) {\n\t\tpw.request.Offset = &wrapperspb.Int64Value{\n\t\t\tValue: offset,\n\t\t}\n\t}\n}", "func WithTopicConsumerOption(topic string) ConsumerOption {\n\treturn func(c *Consumer) {\n\t\tc.config.Topic = topic\n\t}\n}", "func NewResetOffsetConsumerGroupCommand(f *factory.Factory) *cobra.Command {\n\topts := &options{\n\t\tConnection: f.Connection,\n\t\tConfig: f.Config,\n\t\tIO: f.IOStreams,\n\t\tLogger: f.Logger,\n\t\tlocalizer: f.Localizer,\n\t\tContext: f.Context,\n\t}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"reset-offset\",\n\t\tShort: opts.localizer.MustLocalize(\"kafka.consumerGroup.resetOffset.cmd.shortDescription\"),\n\t\tLong: opts.localizer.MustLocalize(\"kafka.consumerGroup.resetOffset.cmd.longDescription\"),\n\t\tExample: opts.localizer.MustLocalize(\"kafka.consumerGroup.resetOffset.cmd.example\"),\n\t\tArgs: cobra.NoArgs,\n\t\tRunE: func(cmd *cobra.Command, args []string) (err error) {\n\t\t\tvalidator = consumergroup.Validator{\n\t\t\t\tLocalizer: opts.localizer,\n\t\t\t}\n\n\t\t\tif opts.offset != \"\" {\n\t\t\t\tif err = validator.ValidateOffset(opts.offset); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif opts.value == \"\" && (opts.offset == consumergroup.OffsetAbsolute || opts.offset == consumergroup.OffsetTimestamp) {\n\t\t\t\treturn opts.localizer.MustLocalizeError(\"kafka.consumerGroup.resetOffset.error.valueRequired\", localize.NewEntry(\"Offset\", opts.offset))\n\t\t\t}\n\n\t\t\tif opts.kafkaID != \"\" {\n\t\t\t\treturn runCmd(opts)\n\t\t\t}\n\n\t\t\tcfg, err := opts.Config.Load()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif !cfg.HasKafka() {\n\t\t\t\treturn opts.localizer.MustLocalizeError(\"kafka.consumerGroup.common.error.noKafkaSelected\")\n\t\t\t}\n\n\t\t\topts.kafkaID = cfg.Services.Kafka.ClusterID\n\n\t\t\treturn runCmd(opts)\n\t\t},\n\t}\n\n\tcmd.Flags().BoolVarP(&opts.skipConfirm, \"yes\", \"y\", false, opts.localizer.MustLocalize(\"kafka.consumerGroup.resetOffset.flag.yes\"))\n\tcmd.Flags().StringVar(&opts.id, \"id\", \"\", opts.localizer.MustLocalize(\"kafka.consumerGroup.common.flag.id.description\", localize.NewEntry(\"Action\", \"reset-offset\")))\n\tcmd.Flags().StringVar(&opts.value, \"value\", \"\", opts.localizer.MustLocalize(\"kafka.consumerGroup.resetOffset.flag.value\"))\n\tcmd.Flags().StringVar(&opts.offset, \"offset\", \"\", opts.localizer.MustLocalize(\"kafka.consumerGroup.resetOffset.flag.offset\"))\n\tcmd.Flags().StringVar(&opts.topic, \"topic\", \"\", opts.localizer.MustLocalize(\"kafka.consumerGroup.resetOffset.flag.topic\"))\n\tcmd.Flags().Int32SliceVar(&opts.partitions, \"partitions\", []int32{}, opts.localizer.MustLocalize(\"kafka.consumerGroup.resetOffset.flag.partitions\"))\n\n\t_ = cmd.MarkFlagRequired(\"id\")\n\t_ = cmd.MarkFlagRequired(\"offset\")\n\t_ = cmd.MarkFlagRequired(\"topic\")\n\n\t// flag based completions for ID\n\t_ = cmd.RegisterFlagCompletionFunc(\"id\", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {\n\t\treturn cmdutil.FilterValidConsumerGroupIDs(f, toComplete)\n\t})\n\n\t// flag based completions for topic\n\t_ = cmd.RegisterFlagCompletionFunc(\"topic\", func(cmd *cobra.Command, _ []string, toComplete string) ([]string, cobra.ShellCompDirective) {\n\t\treturn cmdutil.FilterValidTopicNameArgs(f, toComplete)\n\t})\n\n\tflagutil.EnableOutputFlagCompletion(cmd)\n\tflagutil.EnableStaticFlagCompletion(cmd, \"offset\", consumergroup.ValidOffsets)\n\n\treturn cmd\n}", "func (c *offsetCoordinator) Offset(\n\ttopic string, partition int32) (\n\toffset int64, metadata string, resErr error) {\n\n\tretry := &backoff.Backoff{Min: c.conf.RetryErrWait, Jitter: true}\n\tfor try := 0; try < c.conf.RetryErrLimit; try++ {\n\t\tif try != 0 {\n\t\t\ttime.Sleep(retry.Duration())\n\t\t}\n\n\t\t// get a copy of our connection with the lock, this might establish a new\n\t\t// connection so can take a bit\n\t\tconn, err := c.broker.coordinatorConnection(c.conf.ConsumerGroup)\n\t\tif conn == nil {\n\t\t\tresErr = err\n\t\t\tcontinue\n\t\t}\n\t\tdefer func(lconn *connection) { go c.broker.conns.Idle(lconn) }(conn)\n\n\t\tresp, err := conn.OffsetFetch(&proto.OffsetFetchReq{\n\t\t\tConsumerGroup: c.conf.ConsumerGroup,\n\t\t\tTopics: []proto.OffsetFetchReqTopic{\n\t\t\t\t{\n\t\t\t\t\tName: topic,\n\t\t\t\t\tPartitions: []int32{partition},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tresErr = err\n\n\t\tswitch err {\n\t\tcase io.EOF, syscall.EPIPE:\n\t\t\tlog.Debugf(\"connection died while fetching offsets on %s:%d for %s: %s\",\n\t\t\t\ttopic, partition, c.conf.ConsumerGroup, err)\n\t\t\t_ = conn.Close()\n\n\t\tcase nil:\n\t\t\tfor _, t := range resp.Topics {\n\t\t\t\tfor _, p := range t.Partitions {\n\t\t\t\t\tif t.Name != topic || p.ID != partition {\n\t\t\t\t\t\tlog.Warningf(\"offset response with unexpected data for %s:%d\",\n\t\t\t\t\t\t\tt.Name, p.ID)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif p.Err != nil {\n\t\t\t\t\t\treturn 0, \"\", p.Err\n\t\t\t\t\t}\n\t\t\t\t\t// This is expected in and only in the case where the consumer group, topic\n\t\t\t\t\t// pair is brand new. However, it appears there may be race conditions\n\t\t\t\t\t// where Kafka returns -1 erroneously. Not sure how to handle this yet,\n\t\t\t\t\t// but adding debugging in the meantime.\n\t\t\t\t\tif p.Offset < 0 {\n\t\t\t\t\t\tlog.Errorf(\"negative offset response %d for %s:%d\",\n\t\t\t\t\t\t\tp.Offset, t.Name, p.ID)\n\t\t\t\t\t}\n\t\t\t\t\treturn p.Offset, p.Metadata, nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn 0, \"\", errors.New(\"response does not contain offset information\")\n\t\t}\n\t}\n\n\treturn 0, \"\", resErr\n}", "func (kc *KClient) ConsumeOffsetMsg(topic string, partition int32, offset int64) (message *Message, err error) {\n\tconsumer, err := sarama.NewConsumerFromClient(kc.cl)\n\tif err != nil {\n\t\treturn\n\t}\n\tpartitionConsumer, err := consumer.ConsumePartition(topic, partition, offset)\n\tif err != nil {\n\t\treturn\n\t}\n\tmsg := <-partitionConsumer.Messages()\n\tmessage = convertMsg(msg)\n\terr = partitionConsumer.Close()\n\tif err != nil {\n\t\treturn\n\t}\n\terr = consumer.Close()\n\tif err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func AutoOffsetNone() ConsumerOption {\n\treturn func(o *ConsumerOptions) error {\n\t\to.AutoOffset = autoOffsetNone\n\t\treturn nil\n\t}\n}", "func (c *Consumer) Offset(partitionID int32) (int64, error) {\n\treturn c.zoo.Offset(c.group, c.topic, partitionID)\n}", "func (cc *ConfluentConsumer) Seek(offset int64) error {\n\ttp := kafkalib.TopicPartition{Topic: &cc.conf.Topic, Offset: kafkalib.Offset(offset)}\n\tif cc.conf.Consumer.Partition != nil {\n\t\ttp.Partition = *cc.conf.Consumer.Partition\n\t}\n\n\terr := cc.c.Seek(tp, int(cc.conf.RequestTimeout.Milliseconds()))\n\tif err, ok := err.(kafkalib.Error); ok && err.Code() == kafkalib.ErrTimedOut {\n\t\treturn ErrSeekTimedOut\n\t}\n\n\treturn nil\n}", "func (c *KafkaConsumer) Offset(topic string, partition int32) (int64, error) {\n\tc.partitionConsumersLock.Lock()\n\tdefer c.partitionConsumersLock.Unlock()\n\n\tif !c.exists(topic, partition) {\n\t\tlog.Info(\"Can't get offset as partition consumer for topic %s, partition %d does not exist\", topic, partition)\n\t\treturn -1, ErrPartitionConsumerDoesNotExist\n\t}\n\n\treturn c.partitionConsumers[topic][partition].Offset(), nil\n}", "func WithCheckpoint(checkpoint Checkpoint) Option {\n\treturn func(c *Consumer) error {\n\t\tc.checkpoint = checkpoint\n\t\treturn nil\n\t}\n}", "func NewConsumer(log logrus.FieldLogger, conf Config, opts ...ConfigOpt) (Consumer, error) {\n\t// See Reference at https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md\n\tkafkaConf := conf.baseKafkaConfig()\n\t_ = kafkaConf.SetKey(\"enable.auto.offset.store\", false) // manually StoreOffset after processing a message. Otherwise races may happen.)\n\n\t// In case we try to assign an offset out of range (greater than log-end-offset), consumer will use start consuming from offset zero.\n\t_ = kafkaConf.SetKey(\"auto.offset.reset\", \"earliest\")\n\n\tconf.Consumer.Apply(kafkaConf)\n\tfor _, opt := range opts {\n\t\topt(kafkaConf)\n\t}\n\n\tif err := conf.configureAuth(kafkaConf); err != nil {\n\t\treturn nil, errors.Wrap(err, \"error configuring auth for the Kafka consumer\")\n\t}\n\n\tconsumer, err := kafkalib.NewConsumer(kafkaConf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif conf.RequestTimeout == 0 {\n\t\tconf.RequestTimeout = DefaultTimeout\n\t}\n\n\tcc := &ConfluentConsumer{\n\t\tc: consumer,\n\t\tconf: conf,\n\t\tlog: log,\n\t}\n\n\tlogFields := logrus.Fields{\"kafka_topic\": cc.conf.Topic}\n\n\tif cc.conf.Consumer.Partition != nil || cc.conf.Consumer.PartitionKey != \"\" {\n\t\t// set the default partitioner algorithm\n\t\tif cc.conf.Consumer.PartitionerAlgorithm == \"\" {\n\t\t\tcc.conf.Consumer.PartitionerAlgorithm = PartitionerMurMur2\n\t\t}\n\t\t// Set the partition if a key is set to determine the partition\n\t\tif cc.conf.Consumer.PartitionKey != \"\" && cc.conf.Consumer.PartitionerAlgorithm != \"\" {\n\t\t\tcc.AssignPartitionByKey(cc.conf.Consumer.PartitionKey, cc.conf.Consumer.PartitionerAlgorithm)\n\t\t}\n\n\t\tlogFields[\"kafka_partition_key\"] = cc.conf.Consumer.PartitionKey\n\t\tlogFields[\"kafka_partition\"] = *cc.conf.Consumer.Partition\n\t}\n\n\tcc.setupRebalanceHandler()\n\tcc.log.WithFields(logFields).Debug(\"Subscribing to Kafka topic\")\n\tif serr := cc.c.Subscribe(cc.conf.Topic, cc.rebalanceHandler); serr != nil {\n\t\terr = errors.Wrap(serr, \"error subscribing to topic\")\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cc, nil\n}", "func WithAutoCommitConsumerOption(flag bool) ConsumerOption {\n\treturn func(c *Consumer) { c.autocommit = flag }\n}", "func (o *GetSearchClinicsParams) SetOffset(offset *int64) {\n\to.Offset = offset\n}", "func (c *Client) SetOffset(partition int32, offset int64) (err error) {\n\tom, err := sarama.NewOffsetManagerFromClient(c.group, c.Client)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer om.Close()\n\tpm, err := om.ManagePartition(c.topic, partition)\n\tif err != nil {\n\t\treturn\n\t}\n\tpm.MarkOffset(offset, \"\")\n\tif err = pm.Close(); err != nil {\n\t\treturn\n\t}\n\ttime.Sleep(10 * commitInterval)\n\t// verify\n\tmarked, err := c.OffsetMarked()\n\tlog.Info(\"partititon:%d, before:%d, after:%d\\n\", partition, offset, marked[partition])\n\treturn\n}", "func (msg *Message) MarkOffset() {\n if msg.consumerMsg != nil && msg.client != nil && msg.client.consumer != nil {\n msg.client.consumer.MarkOffset(msg.consumerMsg, \"\")\n }\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewConsumer returns kafka consumer for the given brokers
func NewConsumer( brokers []string, logger log.Logger, options ...ConsumerOption, ) (*Consumer, error) { // default values cfg := kafgo.ReaderConfig{ Brokers: brokers, GroupID: defaultConsumerGroupID, Topic: defaultTopic, Logger: kafka.LoggerFunc(logger.Debugf), } cs := &Consumer{ reader: nil, config: &cfg, } for _, o := range options { o(cs) } if cs.end == nil { return nil, errors.Wrap( ErrCreatingConsumer, "missing endpoint", ) } if cs.dec == nil { return nil, errors.Wrap( ErrCreatingConsumer, "missing decoder", ) } if cs.errFn == nil { cs.errFn = defaultErrorFunc } if cs.errHandler == nil { cs.errHandler = transport.NewLogErrorHandler(logger) } return cs, nil }
[ "func NewConsumer(topic, groupID string, brokers []string) *Consumer {\n\tnewConsumer := kafka.NewReader(kafka.ReaderConfig{\n\t\tBrokers: brokers,\n\t\tGroupID: groupID,\n\t\tTopic: topic,\n\t\tQueueCapacity: 1,\n\t\tStartOffset: kafka.FirstOffset,\n\t})\n\n\treturn &Consumer{\n\t\tconsumer: newConsumer,\n\t}\n}", "func newKafkaConsumer() sarama.Consumer {\n\n\tkafkaBroker := os.Getenv(\"KAFKA_BROKER\")\n\n\tif runtime.GOOS == \"darwin\" {\n\t\tbrokers = []string{\"localhost:9092\"}\n\t} else {\n\t\tif kafkaBroker == \"\" {\n\t\t\tfmt.Printf(\"$KAFKA_BROKER must be set\")\n\t\t\tos.Exit(-1)\n\t\t}\n\t\tbrokers = []string{kafkaBroker}\n\t}\n\n\tconsumer, err := sarama.NewConsumer(brokers, newKafkaConfiguration())\n\n\tfmt.Print(\"Creating new Kafka Consumer \\n\")\n\n\tif err != nil {\n\t\tfmt.Printf(\"Kafka error: %s\\n\", err)\n\t\tos.Exit(-1)\n\t}\n\n\treturn consumer\n}", "func New_kafka_consumer(brokers []string) (sarama.Consumer, error) {\n\tif len(brokers) == 0 {\n\t\terr := errors.New(\"Invalid broker information provided\")\n\t\tlog.Error(err)\n\t\treturn nil, err\n\t}\n\tlog.Infof(\"RequestProcessor: new_kafka_consumer: start\")\n\tconfig := sarama.NewConfig()\n\tlog.Infof(\"Consumer Config: %v\\n\", config)\n\tconsumer, err := sarama.NewConsumer(brokers, config)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to start Sarama consumer: %s\", err)\n\t\treturn nil, err\n\t}\n\tlog.Infof(\"RequestProcessor: new_kafka_consumer: end\")\n\treturn consumer, nil\n}", "func getConsumer(brokers []string, topic string) kafka.PartitionConsumer {\n\tconfig := kafka.NewConfig()\n\tconfig.Net.KeepAlive = 10 * time.Second\n\n\tcommon.Logger.Debugf(\"Connect to Kafka brokers %v\", brokers)\n\tmaster, err := kafka.NewConsumer(brokers, config)\n\tif err != nil {\n\t\tcommon.ErrExit(fmt.Sprintf(\"Fail to make connection to brokers %v: %s\", brokers, err.Error()))\n\t}\n\n\ttopics, err := master.Topics()\n\tif err != nil {\n\t\tcommon.ErrExit(fmt.Sprintf(\"Fail to list topics: %s\", err.Error()))\n\t}\n\tcommon.Logger.Debugf(\"Find Kafka topics: %v\", topics)\n\n\texisted := false\n\tfor _, t := range topics {\n\t\tif t == topic {\n\t\t\texisted = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !existed {\n\t\tcommon.ErrExit(fmt.Sprintf(\"Fail to find topic %s\", topic))\n\t}\n\n\tcommon.Logger.Debugf(\"Create consumer for topic %s with parition %d\", topic, partition)\n\tconsumer, err := master.ConsumePartition(topic, partition, kafka.OffsetNewest)\n\tif err != nil {\n\t\tcommon.ErrExit(fmt.Sprintf(\"Fail to create consumer for topic %s with partition %d: %s\", common.RuntimeCfg.Topic, partition, err.Error()))\n\t}\n\n\treturn consumer\n}", "func NewConsumer(topics []string, valueFactory ValueFactory, opts ...ConsumerOption) (*Consumer, error) {\n\tc := &Consumer{\n\t\tvalueFactory: valueFactory,\n\t\tavroAPI: avro.DefaultConfig,\n\t\tensureTopics: true,\n\t}\n\t// Loop through each option\n\tfor _, opt := range opts {\n\t\t// apply option\n\t\topt.applyC(c)\n\t}\n\n\tvar err error\n\n\t// if consumer not provided - make one\n\tif c.KafkaConsumer == nil {\n\t\t// if kafka config not provided - build default one\n\t\tif c.kafkaCfg == nil {\n\t\t\tvar envCfg struct {\n\t\t\t\tBroker string `env:\"KAFKA_BROKER\" envDefault:\"localhost:9092\"`\n\t\t\t\tCAFile string `env:\"KAFKA_CA_FILE\"`\n\t\t\t\tKeyFile string `env:\"KAFKA_KEY_FILE\"`\n\t\t\t\tCertificateFile string `env:\"KAFKA_CERTIFICATE_FILE\"`\n\t\t\t\tGroupID string `env:\"KAFKA_GROUP_ID\"`\n\t\t\t}\n\t\t\tif err := env.Parse(&envCfg); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// default configuration\n\t\t\tc.kafkaCfg = &kafka.ConfigMap{\n\t\t\t\t\"bootstrap.servers\": envCfg.Broker,\n\t\t\t\t\"socket.keepalive.enable\": true,\n\t\t\t\t\"enable.auto.commit\": false,\n\t\t\t\t\"enable.partition.eof\": true,\n\t\t\t\t\"session.timeout.ms\": 6000,\n\t\t\t\t\"auto.offset.reset\": \"earliest\",\n\t\t\t\t\"group.id\": envCfg.GroupID,\n\t\t\t}\n\n\t\t\tif envCfg.CAFile != \"\" {\n\t\t\t\t// configure SSL\n\t\t\t\tc.kafkaCfg.SetKey(\"security.protocol\", \"ssl\")\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.ca.location\", envCfg.CAFile)\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.key.location\", envCfg.KeyFile)\n\t\t\t\tc.kafkaCfg.SetKey(\"ssl.certificate.location\", envCfg.CertificateFile)\n\t\t\t}\n\t\t}\n\n\t\tif c.KafkaConsumer, err = kafka.NewConsumer(c.kafkaCfg); err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"cannot initialize kafka consumer\")\n\t\t}\n\t}\n\n\tif c.srClient == nil {\n\t\tif c.srURL == nil {\n\t\t\tvar envCfg struct {\n\t\t\t\tSchemaRegistry *url.URL `env:\"KAFKA_SCHEMA_REGISTRY\" envDefault:\"http://localhost:8081\"`\n\t\t\t}\n\t\t\tif err := env.Parse(&envCfg); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.srURL = envCfg.SchemaRegistry\n\t\t}\n\n\t\tif c.srClient, err = NewCachedSchemaRegistryClient(c.srURL.String()); err != nil {\n\t\t\treturn nil, errors.WithMessage(err, \"cannot initialize schema registry client\")\n\t\t}\n\t}\n\n\tif c.eventHandler == nil {\n\t\tc.eventHandler = func(event kafka.Event) {\n\t\t\tlog.Println(event)\n\t\t}\n\t}\n\n\tif topics != nil {\n\t\tif err := c.KafkaConsumer.SubscribeTopics(topics, nil); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif c.ensureTopics {\n\t\t\tif err = c.EnsureTopics(topics); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn c, nil\n}", "func NewConsumerClient(brokers, group, topics string, oldest, verbose bool) *consumerClient {\n\tc := &consumerClient{\n\t\tbrokers: brokers,\n\t\tgroup: group,\n\t\ttopics: topics,\n\t\toldest: oldest,\n\t\tverbose: verbose,\n\t\tversion: \"0.10.2.0\", //连云端ckafka版本必须是这个,没事别乱改\n\t}\n\treturn c\n}", "func NewConsumer(ctx context.Context) (*Consumer, error) {\n\t// TODO support filter in downstream sink\n\ttz, err := util.GetTimezone(timezone)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"can not load timezone\")\n\t}\n\tctx = util.PutTimezoneInCtx(ctx, tz)\n\tfilter, err := cdcfilter.NewFilter(config.GetDefaultReplicaConfig())\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tc := new(Consumer)\n\tc.fakeTableIDGenerator = &fakeTableIDGenerator{\n\t\ttableIDs: make(map[string]int64),\n\t}\n\tc.sinks = make([]*struct {\n\t\tsink.Sink\n\t\tresolvedTs uint64\n\t}, kafkaPartitionNum)\n\tctx, cancel := context.WithCancel(ctx)\n\terrCh := make(chan error, 1)\n\topts := map[string]string{}\n\tfor i := 0; i < int(kafkaPartitionNum); i++ {\n\t\ts, err := sink.NewSink(ctx, \"kafka-consumer\", downstreamURIStr, filter, config.GetDefaultReplicaConfig(), opts, errCh)\n\t\tif err != nil {\n\t\t\tcancel()\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tc.sinks[i] = &struct {\n\t\t\tsink.Sink\n\t\t\tresolvedTs uint64\n\t\t}{Sink: s}\n\t}\n\tsink, err := sink.NewSink(ctx, \"kafka-consumer\", downstreamURIStr, filter, config.GetDefaultReplicaConfig(), opts, errCh)\n\tif err != nil {\n\t\tcancel()\n\t\treturn nil, errors.Trace(err)\n\t}\n\tgo func() {\n\t\terr := <-errCh\n\t\tif errors.Cause(err) != context.Canceled {\n\t\t\tlog.Error(\"error on running consumer\", zap.Error(err))\n\t\t} else {\n\t\t\tlog.Info(\"consumer exited\")\n\t\t}\n\t\tcancel()\n\t}()\n\tc.ddlSink = sink\n\tc.ready = make(chan bool)\n\treturn c, nil\n}", "func (m *ManagedConsumer) newConsumer(ctx context.Context) (*Consumer, error) {\n\tmc, err := m.clientPool.ForTopic(ctx, m.cfg.ManagedClientConfig, m.cfg.Topic)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient, err := mc.Get(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create the topic consumer. A non-blank consumer name is required.\n\tif m.cfg.Exclusive {\n\t\treturn client.NewExclusiveConsumer(ctx, m.cfg.Topic, m.cfg.Name, m.cfg.Earliest, m.queue)\n\t}\n\treturn client.NewSharedConsumer(ctx, m.cfg.Topic, m.cfg.Name, m.queue)\n}", "func newConsumer(s cmdSender, dispatcher *frameDispatcher, topic string, reqID *monotonicID, consumerID uint64, queue chan Message) *Consumer {\n\treturn &Consumer{\n\t\ts: s,\n\t\ttopic: topic,\n\t\tconsumerID: consumerID,\n\t\treqID: reqID,\n\t\tdispatcher: dispatcher,\n\t\tqueue: queue,\n\t\tclosedc: make(chan struct{}),\n\t\tendOfTopicc: make(chan struct{}),\n\t}\n}", "func NewConsumer(addr string, consumer gonzo.Consumer) *Consumer {\n\treturn &Consumer{\n\t\taddress: addr,\n\t\tconsumer: consumer,\n\t\tcustomHandlers: make(map[string]func([]byte) (*Response, error)),\n\t\tclose: make(chan struct{}),\n\t\tclosed: make(chan struct{}),\n\t}\n}", "func NewConsumer(log *zap.Logger, tlsCfg *TLSConfig, lookupds ...string) (*Consumer, error) {\n\tcfg := CreateNSQConfig(tlsCfg)\n\tcfg.LookupdPollInterval = time.Second * 5\n\tcfg.HeartbeatInterval = time.Second * 5\n\tcfg.DefaultRequeueDelay = time.Second * 5\n\tcfg.MaxInFlight = 10\n\n\treturn &Consumer{\n\t\tconfig: cfg,\n\t\tlookupds: lookupds,\n\t\tlog: log,\n\t\tlogLevel: nsq.LogLevelInfo,\n\t}, nil\n}", "func NewConsumer(addrs []string, config *sarama.Config, sensor instana.TracerLogger) (sarama.Consumer, error) {\n\tc, err := sarama.NewConsumer(addrs, config)\n\tif err != nil {\n\t\treturn c, err\n\t}\n\n\treturn WrapConsumer(c, sensor), nil\n}", "func NewConsumer(coord Coordinator, h HandlerFunc, b Balancer) (*Consumer, error) {\n\tc := &Consumer{\n\t\trunning: make(map[string]*runtask),\n\t\thandler: h,\n\t\tbal: b,\n\t\tbalEvery: BalanceEvery,\n\t\tcoord: coord,\n\t\tstop: make(chan struct{}),\n\t\ttasks: make(chan Task),\n\t}\n\tc.im = ignorer(c.tasks, c.stop)\n\n\t// initialize balancer with the consumer and a prefixed logger\n\tb.Init(c)\n\n\tif err := coord.Init(&coordinatorContext{c}); err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}", "func NewConsumer(\n\tlogger *zap.Logger,\n\tserviceName string,\n\tamqpDSN string,\n\tconcurrentProcessingLimit int,\n\thandler func(*Event) error,\n) (*Consumer, error) {\n\tprocessor := &Consumer{\n\t\tlogger: logger,\n\t\tserviceName: serviceName,\n\t\tamqpDSN: amqpDSN,\n\t\tconcurrentProcessingLimit: concurrentProcessingLimit,\n\t\thandler: handler,\n\t}\n\n\terr := processor.init()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn processor, nil\n}", "func NewConsumer(connector *connection.Connector, onChan OnChannel) *Consumer {\n\tfuncCh := make(chan OnChannel)\n\n\treturn &Consumer{\n\t\tconnector: connector,\n\t\tonChan: onChan,\n\t\tfuncCh: funcCh,\n\t}\n}", "func NewConsumer(opt Options) *Consumer {\n\n\treturn &Consumer{\n\t\topt: opt,\n\t}\n\n}", "func NewConsumer(ktraceController KtraceController, psnap ps.Snapshotter, hsnap handle.Snapshotter, config *config.Config) Consumer {\n\tkconsumer := &kstreamConsumer{\n\t\terrs: make(chan error, 1000),\n\t\tconfig: config,\n\t\tpsnapshotter: psnap,\n\t\tktraceController: ktraceController,\n\t\tcapture: config.KcapFile != \"\",\n\t\tsequencer: kevent.NewSequencer(),\n\t\tkevts: make(chan *kevent.Kevent, 500),\n\t\trules: filter.NewRules(psnap, config),\n\t}\n\n\tkconsumer.interceptorChain = interceptors.NewChain(psnap, hsnap, config, kconsumer.enqueueKevent)\n\n\treturn kconsumer\n}", "func NewConsumer(client Client, config *ConsumerConfig, strategy Strategy) Consumer {\n\tvar metrics ConsumerMetrics = noOpConsumerMetrics\n\tif config.EnableMetrics {\n\t\tmetrics = NewKafkaConsumerMetrics(config.Group, config.ConsumerID)\n\t}\n\n\treturn &KafkaConsumer{\n\t\tconfig: config,\n\t\tclient: client,\n\t\tstrategy: strategy,\n\t\tmetrics: metrics,\n\t\tpartitionConsumers: make(map[string]map[int32]PartitionConsumer),\n\t\tpartitionConsumerFactory: NewPartitionConsumer,\n\t\tstopped: make(chan struct{}),\n\t}\n}", "func newBroker(brokers []string, cp ChainPartition) (Broker, error) {\n\tvar candidateBroker, connectedBroker, leaderBroker *sarama.Broker\n\n\t// Connect to one of the given brokers\n\tfor _, hostPort := range brokers {\n\t\tcandidateBroker = sarama.NewBroker(hostPort)\n\t\tif err := candidateBroker.Open(nil); err != nil {\n\t\t\tlogger.Warningf(\"Failed to connect to broker %s: %s\", hostPort, err)\n\t\t\tcontinue\n\t\t}\n\t\tif connected, err := candidateBroker.Connected(); !connected {\n\t\t\tlogger.Warningf(\"Failed to connect to broker %s: %s\", hostPort, err)\n\t\t\tcontinue\n\t\t}\n\t\tconnectedBroker = candidateBroker\n\t\tbreak\n\t}\n\n\tif connectedBroker == nil {\n\t\treturn nil, fmt.Errorf(\"failed to connect to any of the given brokers (%v) for metadata request\", brokers)\n\t}\n\tlogger.Debugf(\"Connected to broker %s\", connectedBroker.Addr())\n\n\t// Get metadata for the topic that corresponds to this chain\n\tmetadata, err := connectedBroker.GetMetadata(&sarama.MetadataRequest{Topics: []string{cp.Topic()}})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get metadata for topic %s: %s\", cp, err)\n\t}\n\n\t// Get the leader broker for this chain partition\n\tif (cp.Partition() >= 0) && (cp.Partition() < int32(len(metadata.Topics[0].Partitions))) {\n\t\tleaderBrokerID := metadata.Topics[0].Partitions[cp.Partition()].Leader\n\t\t// ATTN: If we ever switch to more than one partition per topic, the message\n\t\t// below should be updated to print `cp` (i.e. Topic/Partition) instead of\n\t\t// `cp.Topic()`.\n\t\tlogger.Debugf(\"[channel: %s] Leading broker: %d\", cp.Topic(), leaderBrokerID)\n\t\tfor _, availableBroker := range metadata.Brokers {\n\t\t\tif availableBroker.ID() == leaderBrokerID {\n\t\t\t\tleaderBroker = availableBroker\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif leaderBroker == nil {\n\t\t// ATTN: If we ever switch to more than one partition per topic, the message\n\t\t// below should be updated to print `cp` (i.e. Topic/Partition) instead of\n\t\t// `cp.Topic()`.\n\t\treturn nil, fmt.Errorf(\"[channel: %s] cannot find leader\", cp.Topic())\n\t}\n\n\t// Connect to broker\n\tif err := leaderBroker.Open(nil); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to connect to Kafka broker: %s\", err)\n\t}\n\tif connected, err := leaderBroker.Connected(); !connected {\n\t\treturn nil, fmt.Errorf(\"failed to connect to Kafka broker: %s\", err)\n\t}\n\n\treturn &brokerImpl{broker: leaderBroker}, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
InnerJoin selects records that have matching values in both tables. left datatable is used as reference datatable. InnerJoin transforms an expr column to a raw column
func (left *DataTable) InnerJoin(right *DataTable, on []JoinOn) (*DataTable, error) { return newJoinImpl(innerJoin, []*DataTable{left, right}, on).Compute() }
[ "func (self Accessor) InnerJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).InnerJoin(expr)\n}", "func InnerJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, tables, on).Compute()\n}", "func TestDb_InnerJoin(t *testing.T) {\n\tsql := engine.Select(\"a.runoob_id\", \"a.runoob_author\", \"b.runoob_count\").\n\t\tFrom(\" runoob_tbl a\").\n\t\tInnerJoin(\"tcount_tbl b\").On(\"a.runoob_author=\").And(\"a=\", \"b=\").String()\n\tt.Log(sql)\n}", "func (sb *SelectStatementBuilder) JoinEq(table string, expr1, expr2 *pb.Expr) *SelectStatementBuilder {\n\tif sb.err != nil {\n\t\treturn sb\n\t}\n\teq := newBinaryExpression(expr1, expr2, pb.BinaryOp_EQ)\n\treturn sb.AddJoin(table, eq)\n}", "func NewInnerJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"INNER JOIN\", table, from, to, filter...)\n}", "func (self Accessor) OuterJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).OuterJoin(expr)\n}", "func NewInnerJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewInnerJoinOn(table, \"\", \"\", filter...)\n}", "func NewInnerJoin(table string) JoinQuery {\n\treturn NewInnerJoinOn(table, \"\", \"\")\n}", "func (sd *SelectDataset) joinTable(join exp.JoinExpression) *SelectDataset {\n\treturn sd.copy(sd.clauses.JoinsAppend(join))\n}", "func (q UpdateQuery) Join(table Table, predicate Predicate, predicates ...Predicate) UpdateQuery {\n\tpredicates = append([]Predicate{predicate}, predicates...)\n\tq.JoinTables = append(q.JoinTables, JoinTable{\n\t\tJoinType: JoinTypeInner,\n\t\tTable: table,\n\t\tOnPredicates: VariadicPredicate{\n\t\t\tPredicates: predicates,\n\t\t},\n\t})\n\treturn q\n}", "func ExtractJoinEqualityFilter(\n\tleftCol, rightCol opt.ColumnID, leftCols, rightCols opt.ColSet, on FiltersExpr,\n) FiltersItem {\n\tfor i := range on {\n\t\tcondition := on[i].Condition\n\t\tok, left, right := ExtractJoinEquality(leftCols, rightCols, condition)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif left == leftCol && right == rightCol {\n\t\t\treturn on[i]\n\t\t}\n\t}\n\tpanic(errors.AssertionFailedf(\"could not find equality between columns %d and %d in filters %s\",\n\t\tleftCol, rightCol, on.String(),\n\t))\n}", "func (q Query) Join(inner Query,\n\touterKeySelector func(interface{}) interface{},\n\tinnerKeySelector func(interface{}) interface{},\n\tresultSelector func(outer interface{}, inner interface{}) interface{}) Query {\n\n\treturn Query{\n\t\tIterate: func() Iterator {\n\t\t\touternext := q.Iterate()\n\t\t\tinnernext := inner.Iterate()\n\n\t\t\tinnerLookup := make(map[interface{}][]interface{})\n\t\t\tfor innerItem, ok := innernext(); ok; innerItem, ok = innernext() {\n\t\t\t\tinnerKey := innerKeySelector(innerItem)\n\t\t\t\tinnerLookup[innerKey] = append(innerLookup[innerKey], innerItem)\n\t\t\t}\n\n\t\t\tvar outerItem interface{}\n\t\t\tvar innerGroup []interface{}\n\t\t\tinnerLen, innerIndex := 0, 0\n\n\t\t\treturn func() (item interface{}, ok bool) {\n\t\t\t\tif innerIndex >= innerLen {\n\t\t\t\t\thas := false\n\t\t\t\t\tfor !has {\n\t\t\t\t\t\touterItem, ok = outernext()\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tinnerGroup, has = innerLookup[outerKeySelector(outerItem)]\n\t\t\t\t\t\tinnerLen = len(innerGroup)\n\t\t\t\t\t\tinnerIndex = 0\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\titem = resultSelector(outerItem, innerGroup[innerIndex])\n\t\t\t\tinnerIndex++\n\t\t\t\treturn item, true\n\t\t\t}\n\t\t},\n\t}\n}", "func ExtractJoinEquality(\n\tleftCols, rightCols opt.ColSet, condition opt.ScalarExpr,\n) (ok bool, left, right opt.ColumnID) {\n\tlvar, rvar, ok := isVarEquality(condition)\n\tif !ok {\n\t\treturn false, 0, 0\n\t}\n\n\t// Don't allow mixed types (see #22519).\n\tif !lvar.DataType().Equivalent(rvar.DataType()) {\n\t\treturn false, 0, 0\n\t}\n\n\tif leftCols.Contains(lvar.Col) && rightCols.Contains(rvar.Col) {\n\t\treturn true, lvar.Col, rvar.Col\n\t}\n\tif leftCols.Contains(rvar.Col) && rightCols.Contains(lvar.Col) {\n\t\treturn true, rvar.Col, lvar.Col\n\t}\n\n\treturn false, 0, 0\n}", "func (r1 *sqlTable) Join(r2 rel.Relation, zero interface{}) rel.Relation {\n\t// TODO(jonlawlor): if both r1 and r2 are on the same server, pass it\n\t// through to the source database.\n\treturn rel.NewJoin(r1, r2, zero)\n}", "func (sd *SelectDataset) FullOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullOuterJoinType, table, condition))\n}", "func ExtractJoinEqualityColumns(\n\tleftCols, rightCols opt.ColSet, on FiltersExpr,\n) (leftEq opt.ColList, rightEq opt.ColList) {\n\tfor i := range on {\n\t\tcondition := on[i].Condition\n\t\tok, left, right := ExtractJoinEquality(leftCols, rightCols, condition)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\t// Don't allow any column to show up twice.\n\t\t// TODO(radu): need to figure out the right thing to do in cases\n\t\t// like: left.a = right.a AND left.a = right.b\n\t\tduplicate := false\n\t\tfor i := range leftEq {\n\t\t\tif leftEq[i] == left || rightEq[i] == right {\n\t\t\t\tduplicate = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !duplicate {\n\t\t\tleftEq = append(leftEq, left)\n\t\t\trightEq = append(rightEq, right)\n\t\t}\n\t}\n\treturn leftEq, rightEq\n}", "func (a joinedTable) equal(b joinedTable) bool {\n\treturn a.secondaryTable == b.secondaryTable && a.primaryColumn == b.primaryColumn && a.secondaryColumn == b.secondaryColumn\n}", "func (session *Session) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session {\n\tsession.Statement.Join(joinOperator, tablename, condition, args...)\n\treturn session\n}", "func JoinWith(handler *model.JoinTableHandler, ne *engine.Engine, source interface{}) error {\n\tne.Scope.ContextValue(source)\n\ttableName := handler.TableName\n\tquotedTableName := Quote(ne, tableName)\n\tvar joinConditions []string\n\tm, err := GetModelStruct(ne, source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif handler.Source.ModelType == m.ModelType {\n\t\td := reflect.New(handler.Destination.ModelType).Interface()\n\t\tdestinationTableName := QuotedTableName(ne, d)\n\t\tfor _, foreignKey := range handler.Destination.ForeignKeys {\n\t\t\tjoinConditions = append(joinConditions, fmt.Sprintf(\"%v.%v = %v.%v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tQuote(ne, foreignKey.DBName),\n\t\t\t\tdestinationTableName,\n\t\t\t\tQuote(ne, foreignKey.AssociationDBName)))\n\t\t}\n\n\t\tvar foreignDBNames []string\n\t\tvar foreignFieldNames []string\n\t\tfor _, foreignKey := range handler.Source.ForeignKeys {\n\t\t\tforeignDBNames = append(foreignDBNames, foreignKey.DBName)\n\t\t\tif field, ok := FieldByName(ne, source, foreignKey.AssociationDBName); ok == nil {\n\t\t\t\tforeignFieldNames = append(foreignFieldNames, field.Name)\n\t\t\t}\n\t\t}\n\n\t\tforeignFieldValues := util.ColumnAsArray(foreignFieldNames, ne.Scope.ValueOf())\n\n\t\tvar condString string\n\t\tif len(foreignFieldValues) > 0 {\n\t\t\tvar quotedForeignDBNames []string\n\t\t\tfor _, dbName := range foreignDBNames {\n\t\t\t\tquotedForeignDBNames = append(quotedForeignDBNames, tableName+\".\"+dbName)\n\t\t\t}\n\n\t\t\tcondString = fmt.Sprintf(\"%v IN (%v)\",\n\t\t\t\tToQueryCondition(ne, quotedForeignDBNames),\n\t\t\t\tutil.ToQueryMarks(foreignFieldValues))\n\t\t} else {\n\t\t\tcondString = fmt.Sprintf(\"1 <> 1\")\n\t\t}\n\n\t\tsearch.Join(ne,\n\t\t\tfmt.Sprintf(\"INNER JOIN %v ON %v\",\n\t\t\t\tquotedTableName,\n\t\t\t\tstrings.Join(joinConditions, \" AND \")))\n\t\tsearch.Where(ne, condString, util.ToQueryValues(foreignFieldValues)...)\n\t\treturn nil\n\t}\n\treturn errors.New(\"wrong source type for join table handler\")\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
InnerJoin selects records that have matching values in both tables. tables[0] is used as reference datatable.
func InnerJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) { return newJoinImpl(innerJoin, tables, on).Compute() }
[ "func (left *DataTable) InnerJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, []*DataTable{left, right}, on).Compute()\n}", "func TestDb_InnerJoin(t *testing.T) {\n\tsql := engine.Select(\"a.runoob_id\", \"a.runoob_author\", \"b.runoob_count\").\n\t\tFrom(\" runoob_tbl a\").\n\t\tInnerJoin(\"tcount_tbl b\").On(\"a.runoob_author=\").And(\"a=\", \"b=\").String()\n\tt.Log(sql)\n}", "func NewInnerJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"INNER JOIN\", table, from, to, filter...)\n}", "func NewInnerJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewInnerJoinOn(table, \"\", \"\", filter...)\n}", "func NewInnerJoin(table string) JoinQuery {\n\treturn NewInnerJoinOn(table, \"\", \"\")\n}", "func (sd *SelectDataset) joinTable(join exp.JoinExpression) *SelectDataset {\n\treturn sd.copy(sd.clauses.JoinsAppend(join))\n}", "func (self Accessor) InnerJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).InnerJoin(expr)\n}", "func (a joinedTable) equal(b joinedTable) bool {\n\treturn a.secondaryTable == b.secondaryTable && a.primaryColumn == b.primaryColumn && a.secondaryColumn == b.secondaryColumn\n}", "func (q UpdateQuery) Join(table Table, predicate Predicate, predicates ...Predicate) UpdateQuery {\n\tpredicates = append([]Predicate{predicate}, predicates...)\n\tq.JoinTables = append(q.JoinTables, JoinTable{\n\t\tJoinType: JoinTypeInner,\n\t\tTable: table,\n\t\tOnPredicates: VariadicPredicate{\n\t\t\tPredicates: predicates,\n\t\t},\n\t})\n\treturn q\n}", "func OuterJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(outerJoin, tables, on).Compute()\n}", "func (r1 *sqlTable) Join(r2 rel.Relation, zero interface{}) rel.Relation {\n\t// TODO(jonlawlor): if both r1 and r2 are on the same server, pass it\n\t// through to the source database.\n\treturn rel.NewJoin(r1, r2, zero)\n}", "func (params *ListParams) AddInnerJoin(tableName string, onStatement string) {\n\tparams.AddJoin(tableName, onStatement, JoinInner)\n}", "func (w *Wrapper) JoinWhere(table interface{}, args ...interface{}) *Wrapper {\n\tw.saveJoinCondition(\"AND\", table, args...)\n\treturn w\n}", "func (sb *SelectStatementBuilder) JoinEq(table string, expr1, expr2 *pb.Expr) *SelectStatementBuilder {\n\tif sb.err != nil {\n\t\treturn sb\n\t}\n\teq := newBinaryExpression(expr1, expr2, pb.BinaryOp_EQ)\n\treturn sb.AddJoin(table, eq)\n}", "func (q Query) Join(inner Query,\n\touterKeySelector func(interface{}) interface{},\n\tinnerKeySelector func(interface{}) interface{},\n\tresultSelector func(outer interface{}, inner interface{}) interface{}) Query {\n\n\treturn Query{\n\t\tIterate: func() Iterator {\n\t\t\touternext := q.Iterate()\n\t\t\tinnernext := inner.Iterate()\n\n\t\t\tinnerLookup := make(map[interface{}][]interface{})\n\t\t\tfor innerItem, ok := innernext(); ok; innerItem, ok = innernext() {\n\t\t\t\tinnerKey := innerKeySelector(innerItem)\n\t\t\t\tinnerLookup[innerKey] = append(innerLookup[innerKey], innerItem)\n\t\t\t}\n\n\t\t\tvar outerItem interface{}\n\t\t\tvar innerGroup []interface{}\n\t\t\tinnerLen, innerIndex := 0, 0\n\n\t\t\treturn func() (item interface{}, ok bool) {\n\t\t\t\tif innerIndex >= innerLen {\n\t\t\t\t\thas := false\n\t\t\t\t\tfor !has {\n\t\t\t\t\t\touterItem, ok = outernext()\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tinnerGroup, has = innerLookup[outerKeySelector(outerItem)]\n\t\t\t\t\t\tinnerLen = len(innerGroup)\n\t\t\t\t\t\tinnerIndex = 0\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\titem = resultSelector(outerItem, innerGroup[innerIndex])\n\t\t\t\tinnerIndex++\n\t\t\t\treturn item, true\n\t\t\t}\n\t\t},\n\t}\n}", "func (sd *SelectDataset) FullOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullOuterJoinType, table, condition))\n}", "func (stmt *statement) FullJoin(table, on string) Statement {\n\tstmt.join(\"FULL JOIN \", table, on)\n\treturn stmt\n}", "func (set PodSet) Join(set2 PodSet) PodSet {\n\tfor podID := range set2 {\n\t\tset.Add(podID)\n\t}\n\treturn set\n}", "func (s *BaseMySqlParserListener) EnterInnerJoin(ctx *InnerJoinContext) {}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
LeftJoin the tables. tables[0] is used as reference datatable.
func LeftJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) { return newJoinImpl(leftJoin, tables, on).Compute() }
[ "func (stmt *statement) LeftJoin(table, on string) Statement {\n\tstmt.join(\"LEFT JOIN \", table, on)\n\treturn stmt\n}", "func (t *Table) LeftJoin(offset int32, count int, crit string, target interface{}) error {\n\tbody, err := t.LeftJoinRaw(offset, count, crit)\n\tif err == nil {\n\t\terr = json.Unmarshal(body, &target)\n\t}\n\treturn err\n}", "func (dba *Sqler) LeftJoin(args ...interface{}) *Sqler {\n\t//dba.parseJoin(args, \"LEFT\")\n\tdba.join = append(dba.join, []interface{}{\"LEFT\", args})\n\n\treturn dba\n}", "func (sd *SelectDataset) LeftOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.LeftOuterJoinType, table, condition))\n}", "func (q UpdateQuery) LeftJoin(table Table, predicate Predicate, predicates ...Predicate) UpdateQuery {\n\tpredicates = append([]Predicate{predicate}, predicates...)\n\tq.JoinTables = append(q.JoinTables, JoinTable{\n\t\tJoinType: JoinTypeLeft,\n\t\tTable: table,\n\t\tOnPredicates: VariadicPredicate{\n\t\t\tPredicates: predicates,\n\t\t},\n\t})\n\treturn q\n}", "func (jn *Join) LeftTbl() *TblSelector {\n\treturn jn.leftTbl\n}", "func NewLeftJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"LEFT JOIN\", table, from, to, filter...)\n}", "func (sd *SelectDataset) NaturalLeftJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalLeftJoinType, table))\n}", "func NewLeftJoinOn(table string, from string, to string) JoinQuery {\n\treturn NewJoinWith(\"LEFT JOIN\", table, from, to)\n}", "func (t *Table) LeftJoinRaw(offset int32, count int, crit string) ([]byte, error) {\n\tp := \"https://%s/api/getLeftJoin.sjs?json&object=%s&limit=%d,%d\"\n\tx := fmt.Sprintf(p, t.Host, t.Name, offset, count)\n\tif len(crit) != 0 {\n\t\tx = x + \"&condition=\" + FixCrit(crit)\n\t}\n\t_, body, err := t.Get(x)\n\treturn body, err\n}", "func NewLeftJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewLeftJoinOn(table, \"\", \"\", filter...)\n}", "func NewLeftJoin(table string) JoinQuery {\n\treturn NewLeftJoinOn(table, \"\", \"\")\n}", "func (t *Table) LeftJoinMap(offset int32, count int, crit string) ([]map[string]string, error) {\n\tvar a []map[string]string\n\tbody, err := t.LeftJoinRaw(offset, count, crit)\n\ta = unpackGJsonArray(body)\n\treturn a, err\n}", "func (params *ListParams) AddLeftJoin(tableName string, onStatement string) {\n\tparams.AddJoin(tableName, onStatement, JoinLeft)\n}", "func (f *predicateSqlizerFactory) createLeftJoin(secondaryTable string, primaryColumn string, secondaryColumn string) string {\n\tnewAlias := joinedTable{secondaryTable, primaryColumn, secondaryColumn}\n\tfor i, alias := range f.joinedTables {\n\t\tif alias.equal(newAlias) {\n\t\t\treturn f.aliasName(secondaryTable, i)\n\t\t}\n\t}\n\n\tf.joinedTables = append(f.joinedTables, newAlias)\n\treturn f.aliasName(secondaryTable, len(f.joinedTables)-1)\n}", "func OuterJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(outerJoin, tables, on).Compute()\n}", "func (mp *JoinMultiplicity) JoinPreservesLeftRows(op opt.Operator) bool {\n\tswitch op {\n\tcase opt.InnerJoinOp, opt.SemiJoinOp:\n\t\tbreak\n\n\tcase opt.LeftJoinOp, opt.FullJoinOp:\n\t\treturn true\n\n\tdefault:\n\t\tpanic(errors.AssertionFailedf(\"unsupported operator: %v\", op))\n\t}\n\treturn mp.JoinFiltersMatchAllLeftRows()\n}", "func LeftFromTable(timeTable map[string]map[rune][]uint) (ret []uint) {\n\tfor _, day := range timeTable {\n\t\tfor _, ids := range day {\n\t\t\tfor _, id := range ids {\n\t\t\t\tret = append(ret, id)\n\t\t\t}\n\t\t}\n\t}\n\treturn SliceUniqMap(ret)\n}", "func LeftJoinFloat64(lx, rx []float64) []float64 {\n\tresult := make([]float64, 0, len(lx))\n\trhash := hashSliceFloat64(rx)\n\n\tfor _, v := range lx {\n\t\t_, ok := rhash[v]\n\t\tif !ok {\n\t\t\tresult = append(result, v)\n\t\t}\n\t}\n\treturn result\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
RightJoin the tables. tables[0] is used as reference datatable.
func RightJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) { return newJoinImpl(rightJoin, tables, on).Compute() }
[ "func (stmt *statement) RightJoin(table, on string) Statement {\n\tstmt.join(\"RIGHT JOIN \", table, on)\n\treturn stmt\n}", "func (w *Wrapper) RightJoin(table interface{}, condition string) *Wrapper {\n\tw.saveJoin(table, \"RIGHT JOIN\", condition)\n\treturn w\n}", "func (dba *Sqler) RightJoin(args ...interface{}) *Sqler {\n\t//dba.parseJoin(args, \"RIGHT\")\n\tdba.join = append(dba.join, []interface{}{\"RIGHT\", args})\n\n\treturn dba\n}", "func (q UpdateQuery) RightJoin(table Table, predicate Predicate, predicates ...Predicate) UpdateQuery {\n\tpredicates = append([]Predicate{predicate}, predicates...)\n\tq.JoinTables = append(q.JoinTables, JoinTable{\n\t\tJoinType: JoinTypeRight,\n\t\tTable: table,\n\t\tOnPredicates: VariadicPredicate{\n\t\t\tPredicates: predicates,\n\t\t},\n\t})\n\treturn q\n}", "func (jn *Join) RightTbl() *TblSelector {\n\treturn jn.rightTbl\n}", "func RightOuterJoin(clause string, args ...interface{}) QueryMod {\n\treturn rightOuterJoinQueryMod{\n\t\tclause: clause,\n\t\targs: args,\n\t}\n}", "func NewRightJoinOn(table string, from string, to string, filter ...FilterQuery) JoinQuery {\n\treturn NewJoinWith(\"RIGHT JOIN\", table, from, to, filter...)\n}", "func (sd *SelectDataset) NaturalRightJoin(table exp.Expression) *SelectDataset {\n\treturn sd.joinTable(exp.NewUnConditionedJoinExpression(exp.NaturalRightJoinType, table))\n}", "func NewRightJoin(table string) JoinQuery {\n\treturn NewRightJoinOn(table, \"\", \"\")\n}", "func NewRightJoin(table string, filter ...FilterQuery) JoinQuery {\n\treturn NewRightJoinOn(table, \"\", \"\", filter...)\n}", "func (params *ListParams) AddRightJoin(tableName string, onStatement string) {\n\tparams.AddJoin(tableName, onStatement, JoinRight)\n}", "func (dr *DataRow) joinOnColumnIndexRight(rightRow DataRow, onColumnIndexRight int) DataRow {\n\toutItems := make([]DataItem, 0, len(dr.Items)+len(rightRow.Items)-1)\n\t// append left row\n\toutItems = append(outItems, dr.Items...)\n\t// append right row except on column\n\toutItems = append(outItems, rightRow.Items[:onColumnIndexRight]...)\n\toutItems = append(outItems, rightRow.Items[onColumnIndexRight+1:]...)\n\n\treturn DataRow{\n\t\tItems: outItems,\n\t}\n}", "func RightJoinInt64(lx, rx []int64) []int64 { return LeftJoinInt64(rx, lx) }", "func (mp *JoinMultiplicity) JoinPreservesRightRows(op opt.Operator) bool {\n\tswitch op {\n\tcase opt.InnerJoinOp, opt.LeftJoinOp:\n\t\tbreak\n\n\tcase opt.FullJoinOp:\n\t\treturn true\n\n\tcase opt.SemiJoinOp:\n\t\tpanic(errors.AssertionFailedf(\"right rows are not included in the output of a %v\", op))\n\n\tdefault:\n\t\tpanic(errors.AssertionFailedf(\"unsupported operator: %v\", op))\n\t}\n\treturn mp.JoinFiltersMatchAllRightRows()\n}", "func (i *mergeJoinIter) nullifyRightRow(r sql.Row) sql.Row {\n\tfor j := i.scopeLen + i.parentLen + i.leftRowLen; j < len(r); j++ {\n\t\tr[j] = nil\n\t}\n\treturn r\n}", "func (r1 *sqlTable) Join(r2 rel.Relation, zero interface{}) rel.Relation {\n\t// TODO(jonlawlor): if both r1 and r2 are on the same server, pass it\n\t// through to the source database.\n\treturn rel.NewJoin(r1, r2, zero)\n}", "func (stmt *statement) FullJoin(table, on string) Statement {\n\tstmt.join(\"FULL JOIN \", table, on)\n\treturn stmt\n}", "func (hj *hashJoiner) emitRight(matched bool) {\n\t// Make sure that hj.probeState.buildIdx is of sufficient size (it is used\n\t// as a selection vector to select only the necessary tuples).\n\tbuildIdxSize := hj.ht.Vals.Length() - hj.emittingRightState.rowIdx\n\tif buildIdxSize > coldata.BatchSize() {\n\t\tbuildIdxSize = coldata.BatchSize()\n\t}\n\tif cap(hj.probeState.buildIdx) < buildIdxSize {\n\t\thj.probeState.buildIdx = make([]int, buildIdxSize)\n\t} else {\n\t\thj.probeState.buildIdx = hj.probeState.buildIdx[:buildIdxSize]\n\t}\n\n\t// Find the next batch of tuples that have the requested 'matched' value.\n\tnResults := 0\n\tfor nResults < coldata.BatchSize() && hj.emittingRightState.rowIdx < hj.ht.Vals.Length() {\n\t\tif hj.probeState.buildRowMatched[hj.emittingRightState.rowIdx] == matched {\n\t\t\thj.probeState.buildIdx[nResults] = hj.emittingRightState.rowIdx\n\t\t\tnResults++\n\t\t}\n\t\thj.emittingRightState.rowIdx++\n\t}\n\thj.resetOutput(nResults)\n\n\t// We have already fully built the hash table from the right input and now\n\t// are only populating output one batch at a time. If we were to use a\n\t// limited allocator, we could hit the limit here, and it would have been\n\t// very hard to fall back to disk backed hash joiner because we might have\n\t// already emitted partial output.\n\thj.outputUnlimitedAllocator.PerformOperation(hj.output.ColVecs(), func() {\n\t\tvar rightOutColOffset int\n\t\tif hj.spec.JoinType.ShouldIncludeLeftColsInOutput() {\n\t\t\t// Set all elements in the probe columns of the output batch to null.\n\t\t\tfor i := range hj.spec.Left.SourceTypes {\n\t\t\t\toutCol := hj.output.ColVec(i)\n\t\t\t\toutCol.Nulls().SetNullRange(0 /* startIdx */, nResults)\n\t\t\t}\n\t\t\trightOutColOffset = len(hj.spec.Left.SourceTypes)\n\t\t}\n\n\t\toutCols := hj.output.ColVecs()[rightOutColOffset : rightOutColOffset+len(hj.spec.Right.SourceTypes)]\n\t\tfor i := range hj.spec.Right.SourceTypes {\n\t\t\toutCol := outCols[i]\n\t\t\tvalCol := hj.ht.Vals.ColVec(i)\n\t\t\toutCol.Copy(\n\t\t\t\tcoldata.CopySliceArgs{\n\t\t\t\t\tSliceArgs: coldata.SliceArgs{\n\t\t\t\t\t\tSrc: valCol,\n\t\t\t\t\t\tSrcEndIdx: nResults,\n\t\t\t\t\t\tSel: hj.probeState.buildIdx,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t)\n\t\t}\n\n\t\thj.output.SetLength(nResults)\n\t})\n}", "func (b *Builder) FullJoin(joinTable string, joinCond interface{}) *Builder {\r\n\treturn b.Join(\"FULL\", joinTable, joinCond)\r\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
OuterJoin the tables. tables[0] is used as reference datatable.
func OuterJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) { return newJoinImpl(outerJoin, tables, on).Compute() }
[ "func (mySelf SQLJoin) Outer() SQLJoin {\n\tmySelf.outer = true\n\treturn mySelf\n}", "func (sd *SelectDataset) FullOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.FullOuterJoinType, table, condition))\n}", "func (s *SelectStmt) OuterJoin(right interface{}, condition interface{}) *SelectStmt {\n\ts.Joins = append(s.Joins, JoinStmt{Type: OuterJoin, Right: right, Condition: condition})\n\treturn s\n}", "func (self Accessor) OuterJoin(expr interface{}) *SelectManager {\n\treturn self.From(self.Relation()).OuterJoin(expr)\n}", "func (sd *SelectDataset) LeftOuterJoin(table exp.Expression, condition exp.JoinCondition) *SelectDataset {\n\treturn sd.joinTable(exp.NewConditionedJoinExpression(exp.LeftOuterJoinType, table, condition))\n}", "func FullOuterJoin(clause string, args ...interface{}) QueryMod {\n\treturn fullOuterJoinQueryMod{\n\t\tclause: clause,\n\t\targs: args,\n\t}\n}", "func InnerJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, tables, on).Compute()\n}", "func (stmt *statement) FullJoin(table, on string) Statement {\n\tstmt.join(\"FULL JOIN \", table, on)\n\treturn stmt\n}", "func LeftJoin(tables []*DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(leftJoin, tables, on).Compute()\n}", "func (s *BaseMySqlParserListener) EnterOuterJoin(ctx *OuterJoinContext) {}", "func (s *BasePlSqlParserListener) EnterOuter_join_sign(ctx *Outer_join_signContext) {}", "func (left *DataTable) InnerJoin(right *DataTable, on []JoinOn) (*DataTable, error) {\n\treturn newJoinImpl(innerJoin, []*DataTable{left, right}, on).Compute()\n}", "func OuterJoinString(lx, rx []string) []string {\n\tljoin := LeftJoinString(lx, rx)\n\trjoin := RightJoinString(lx, rx)\n\n\tresult := make([]string, len(ljoin)+len(rjoin))\n\tcopy(result, ljoin)\n\tfor i, v := range rjoin {\n\t\tresult[len(ljoin)+i] = v\n\t}\n\treturn result\n}", "func (stmt *statement) LeftJoin(table, on string) Statement {\n\tstmt.join(\"LEFT JOIN \", table, on)\n\treturn stmt\n}", "func Join(schema, table string) string {\n\treturn schema + \".\" + table\n}", "func outerJoinSimplify(p *Join, predicates []expression.Expression) error {\n\tvar innerTable, outerTable LogicalPlan\n\tchild1 := p.GetChildByIndex(0).(LogicalPlan)\n\tchild2 := p.GetChildByIndex(1).(LogicalPlan)\n\tvar fullConditions []expression.Expression\n\tif p.JoinType == LeftOuterJoin {\n\t\tinnerTable = child2\n\t\touterTable = child1\n\t} else if p.JoinType == RightOuterJoin || p.JoinType == InnerJoin {\n\t\tinnerTable = child1\n\t\touterTable = child2\n\t} else {\n\t\treturn nil\n\t}\n\t// first simplify embedded outer join.\n\t// When trying to simplify an embedded outer join operation in a query,\n\t// we must take into account the join condition for the embedding outer join together with the WHERE condition.\n\tif innerPlan, ok := innerTable.(*Join); ok {\n\t\tfullConditions = concatOnAndWhereConds(p, predicates)\n\t\terr := outerJoinSimplify(innerPlan, fullConditions)\n\t\tif err != nil {\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t}\n\tif outerPlan, ok := outerTable.(*Join); ok {\n\t\tif fullConditions != nil {\n\t\t\tfullConditions = concatOnAndWhereConds(p, predicates)\n\t\t}\n\t\terr := outerJoinSimplify(outerPlan, fullConditions)\n\t\tif err != nil {\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t}\n\tif p.JoinType == InnerJoin {\n\t\treturn nil\n\t}\n\t// then simplify embedding outer join.\n\tcanBeSimplified := false\n\tfor _, expr := range predicates {\n\t\tisOk, err := isNullRejected(innerTable.GetSchema(), expr)\n\t\tif err != nil {\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t\tif isOk {\n\t\t\tcanBeSimplified = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif canBeSimplified {\n\t\tp.JoinType = InnerJoin\n\t}\n\treturn nil\n}", "func (s *BasePlSqlParserListener) EnterOuter_join_type(ctx *Outer_join_typeContext) {}", "func (r1 *sqlTable) Join(r2 rel.Relation, zero interface{}) rel.Relation {\n\t// TODO(jonlawlor): if both r1 and r2 are on the same server, pass it\n\t// through to the source database.\n\treturn rel.NewJoin(r1, r2, zero)\n}", "func OuterJoinFloat64(lx, rx []float64) []float64 {\n\tljoin := LeftJoinFloat64(lx, rx)\n\trjoin := RightJoinFloat64(lx, rx)\n\n\tresult := make([]float64, len(ljoin)+len(rjoin))\n\tcopy(result, ljoin)\n\tfor i, v := range rjoin {\n\t\tresult[len(ljoin)+i] = v\n\t}\n\treturn result\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetOrders retrieves paginated orders from the Mesh DB at a specific snapshot in time. Passing an empty string as `snapshotID` creates a new snapshot and returns the first set of results. To fetch all orders, continue to make requests supplying the `snapshotID` returned from the first request. After 1 minute of not received further requests referencing a specific snapshot, the snapshot expires and can no longer be used.
func (app *App) GetOrders(page, perPage int, snapshotID string) (*rpc.GetOrdersResponse, error) { ordersInfos := []*zeroex.AcceptedOrderInfo{} if perPage <= 0 { return &rpc.GetOrdersResponse{ OrdersInfos: ordersInfos, SnapshotID: snapshotID, }, nil } var snapshot *db.Snapshot if snapshotID == "" { // Create a new snapshot snapshotID = uuid.New().String() var err error snapshot, err = app.db.Orders.GetSnapshot() if err != nil { return nil, err } expirationTimestamp := time.Now().Add(1 * time.Minute) app.snapshotExpirationWatcher.Add(expirationTimestamp, snapshotID) app.muIdToSnapshotInfo.Lock() app.idToSnapshotInfo[snapshotID] = snapshotInfo{ Snapshot: snapshot, ExpirationTimestamp: expirationTimestamp, } app.muIdToSnapshotInfo.Unlock() } else { // Try and find an existing snapshot app.muIdToSnapshotInfo.Lock() info, ok := app.idToSnapshotInfo[snapshotID] if !ok { app.muIdToSnapshotInfo.Unlock() return nil, ErrSnapshotNotFound{id: snapshotID} } snapshot = info.Snapshot // Reset the snapshot's expiry app.snapshotExpirationWatcher.Remove(info.ExpirationTimestamp, snapshotID) expirationTimestamp := time.Now().Add(1 * time.Minute) app.snapshotExpirationWatcher.Add(expirationTimestamp, snapshotID) app.idToSnapshotInfo[snapshotID] = snapshotInfo{ Snapshot: snapshot, ExpirationTimestamp: expirationTimestamp, } app.muIdToSnapshotInfo.Unlock() } notRemovedFilter := app.db.Orders.IsRemovedIndex.ValueFilter([]byte{0}) var selectedOrders []*meshdb.Order err := snapshot.NewQuery(notRemovedFilter).Offset(page * perPage).Max(perPage).Run(&selectedOrders) if err != nil { return nil, err } for _, order := range selectedOrders { ordersInfos = append(ordersInfos, &zeroex.AcceptedOrderInfo{ OrderHash: order.Hash, SignedOrder: order.SignedOrder, FillableTakerAssetAmount: order.FillableTakerAssetAmount, }) } getOrdersResponse := &rpc.GetOrdersResponse{ SnapshotID: snapshotID, OrdersInfos: ordersInfos, } return getOrdersResponse, nil }
[ "func (handler *rpcHandler) GetOrders(page, perPage int, snapshotID string) (result *types.GetOrdersResponse, err error) {\n\tlog.WithFields(map[string]interface{}{\n\t\t\"page\": page,\n\t\t\"perPage\": perPage,\n\t\t\"snapshotID\": snapshotID,\n\t}).Debug(\"received GetOrders request via RPC\")\n\t// Catch panics, log stack trace and return RPC error message\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tinternalErr, ok := r.(error)\n\t\t\tif !ok {\n\t\t\t\t// If r is not of type error, convert it.\n\t\t\t\tinternalErr = fmt.Errorf(\"Recovered from non-error: (%T) %v\", r, r)\n\t\t\t}\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": internalErr,\n\t\t\t\t\"method\": \"GetOrders\",\n\t\t\t\t\"stackTrace\": string(debug.Stack()),\n\t\t\t}).Error(\"RPC method handler crashed\")\n\t\t\terr = errors.New(\"method handler crashed in GetOrders RPC call (check logs for stack trace)\")\n\t\t}\n\t}()\n\tgetOrdersResponse, err := handler.app.GetOrders(page, perPage, snapshotID)\n\tif err != nil {\n\t\tif _, ok := err.(core.ErrSnapshotNotFound); ok {\n\t\t\treturn nil, err\n\t\t}\n\t\tif _, ok := err.(core.ErrPerPageZero); ok {\n\t\t\treturn nil, err\n\t\t}\n\t\t// We don't want to leak internal error details to the RPC client.\n\t\tlog.WithField(\"error\", err.Error()).Error(\"internal error in GetOrders RPC call\")\n\t\treturn nil, constants.ErrInternal\n\t}\n\treturn getOrdersResponse, nil\n}", "func (s *LazadaClient) GetOrders(params GetOrdersParams) (*GetOrdersResponse, error) {\n\turi := getOrdersURI\n\n\tt := time.Now()\n\n\trequest, _ := http.NewRequest(cgetMethod, apiEndpoint+uri, nil)\n\tq := request.URL.Query()\n\tq.Add(cappKey, s.appKey)\n\tq.Add(ctimestamp, fmt.Sprintf(\"%d000\", t.Unix()))\n\tq.Add(caccessToken, s.accessToken)\n\tq.Add(csignMethod, csha256)\n\tif params.CreatedAfter != \"\" {\n\t\tq.Add(ccreatedAfter, params.CreatedAfter)\n\t}\n\tif params.CreatedBefore != \"\" {\n\t\tq.Add(ccreatedBefore, params.CreatedBefore)\n\t}\n\tif params.UpdatedAfter != \"\" {\n\t\tq.Add(cupdatedAfter, params.UpdatedAfter)\n\t}\n\tif params.UpdatedBefore != \"\" {\n\t\tq.Add(cupdatedBefore, params.UpdatedBefore)\n\t}\n\n\tq.Add(climit, params.Limit)\n\tq.Add(coffset, params.Offset)\n\n\tif params.SortBy != \"\" {\n\t\tq.Add(sortBy, params.SortBy)\n\t}\n\n\tsignString := sign(q, uri, s.appSecret)\n\tq.Add(csign, signString)\n\n\trequest.URL.RawQuery = q.Encode()\n\n\tlog.Println(\"[Client] query orders:\", request.URL.String())\n\n\tresponse, err := s.client.Do(request)\n\tif err != nil {\n\t\tlog.Println(\"[Client] query failed with reason:\", err.Error())\n\t}\n\tdefer response.Body.Close()\n\n\tbuf, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\tlog.Println(\"[Client] read raw body failed with reason:\", err)\n\t\treturn nil, err\n\t}\n\n\tlog.Printf(\"[Client] body raw response: %s\\n\", string(buf))\n\n\tvar res GetOrdersResponse\n\tif err = json.Unmarshal(buf, &res); err != nil {\n\t\tlog.Printf(\"[Client] decode error: %s\\n\", err.Error())\n\t\treturn nil, err\n\t}\n\n\tif res.Code == \"0\" {\n\t\treturn &res, nil\n\t}\n\n\tif res.Code == \"IllegalAccessToken\" {\n\t\treturn nil, ErrTokenExpired\n\t}\n\n\treturn nil, fmt.Errorf(\"code is not success : %s\", res.Code)\n}", "func GetOrders(c *gin.Context) {\n\tid := c.Params.ByName(\"id\")\n\n\tif id == \"\" {\n\t\terrors.ErrRequiredParam(c.Writer, http.StatusBadRequest, \"order id is required\")\n\t\treturn\n\t}\n\n\torder, err := s.client.GetOrder(id)\n\tif err != nil {\n\t\ts.l.Printf(\"failed to request order information: %s\\n\", err)\n\t\treturn\n\t}\n\n\tmodels.Respond(c.Writer, order)\n\treturn\n}", "func GetOrders() (orders []Orders, err error) {\r\n\tvar rows *sql.Rows\r\n\tif rows, err = Get(`select * from orders where deleted_at is null order by created_at desc;`); err != nil {\r\n\t\tCheckError(\"Error getting Orders.\", err, false)\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\tdefer rows.Close()\r\n\tfor rows.Next() {\r\n\t\torder := Orders{}\r\n\t\tif err = rows.Scan(&order.ID, &order.DocEntry, &order.DocNum, &order.Canceled, &order.CardCode, &order.CardName, &order.VatSum, &order.DocTotal, &order.Synced, &order.CreatedBy, &order.CreatedAt, &order.UpdatedAt, &order.DeletedAt, &order.Comment, &order.Returned, &order.DiscountApprovedBy); err != nil {\r\n\t\t\tCheckError(\"Error Scanning Orders.\", err, false)\r\n\t\t} else {\r\n\t\t\torders = append(orders, order)\r\n\t\t}\r\n\t}\r\n\r\n\treturn\r\n}", "func (c *DefaultApiController) GetOrders(w http.ResponseWriter, r *http.Request) { \n\tquery := r.URL.Query()\n\torderId := query.Get(\"order_id\")\n\tresult, err := c.service.GetOrders(r.Context(), orderId)\n\t//If an error occured, encode the error with the status code\n\tif err != nil {\n\t\tEncodeJSONResponse(err.Error(), &result.Code, w)\n\t\treturn\n\t}\n\t//If no error, encode the body and the result code\n\tEncodeJSONResponse(result.Body, &result.Code, w)\n\t\n}", "func (driver *Driver) GetSnapshot(volumeID, snapshotID, snapshotName string) ([]*storagedriver.Snapshot, error) {\n\tvar snapshotsInt []*storagedriver.Snapshot\n\tif volumeID != \"\" {\n\t\tvolumes, err := driver.getVolume(volumeID, \"\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, volume := range volumes {\n\t\t\tfor _, destSnap := range volume.DestSnapList {\n\t\t\t\tsnapshot, err := driver.getSnapshot(strconv.Itoa(int(destSnap.([]interface{})[2].(float64))), \"\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\tvolSize, _ := strconv.Atoi(volume.VolSize)\n\t\t\t\tsnapshotSD := &storagedriver.Snapshot{\n\t\t\t\t\tName: snapshot[0].Name,\n\t\t\t\t\tVolumeID: strconv.Itoa(volume.Index),\n\t\t\t\t\tSnapshotID: strconv.Itoa(snapshot[0].Index),\n\t\t\t\t\tVolumeSize: strconv.Itoa(volSize / 1024 / 1024),\n\t\t\t\t\tStartTime: snapshot[0].CreationTime,\n\t\t\t\t\tDescription: \"\",\n\t\t\t\t\tStatus: \"\",\n\t\t\t\t}\n\t\t\t\tsnapshotsInt = append(snapshotsInt, snapshotSD)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tsnapshots, err := driver.getSnapshot(snapshotID, snapshotName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, snapshot := range snapshots {\n\t\t\tsnapshot, err := goxtremio.GetSnapshot(strconv.Itoa(snapshot.Index), \"\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tvolume, err := driver.getVolume(strconv.Itoa(int(snapshot.AncestorVolID[2].(float64))), \"\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tvolSize, _ := strconv.Atoi(volume[0].VolSize)\n\t\t\tsnapshotSD := &storagedriver.Snapshot{\n\t\t\t\tName: snapshot.Name,\n\t\t\t\tVolumeID: strconv.Itoa(int(snapshot.AncestorVolID[2].(float64))),\n\t\t\t\tSnapshotID: strconv.Itoa(snapshot.Index),\n\t\t\t\tVolumeSize: strconv.Itoa(volSize / 1024 / 1024),\n\t\t\t\tStartTime: snapshot.CreationTime,\n\t\t\t\tDescription: \"\",\n\t\t\t\tStatus: \"\",\n\t\t\t}\n\t\t\tsnapshotsInt = append(snapshotsInt, snapshotSD)\n\t\t}\n\n\t}\n\n\treturn snapshotsInt, nil\n}", "func (d *driver) GetSnapshot(\n\tvolumeID, snapshotID, snapshotName string) ([]*core.Snapshot, error) {\n\n\tvar snapshotsSD []*core.Snapshot\n\n\tif snapshotID != \"\" || snapshotName != \"\" {\n\t\tidInt, err := strconv.ParseInt(snapshotID, 10, 64)\n\t\tif err != nil {\n\t\t\tidInt = -1\n\t\t}\n\t\tsnapshot, err := d.client.GetSnapshot(idInt, snapshotName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif snapshot == nil {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tvolumeName := d.client.NameFromPath(snapshot.Path)\n\t\tif volumeID != \"\" && volumeID != volumeName {\n\t\t\treturn nil, goof.New(fmt.Sprintf(\"Snapshot volume name does not match volumeID: Snapshot volume: (%s) volumeID: (%s)\", volumeName, volumeID))\n\t\t}\n\t\tsize, err := d.getSize(\"\", volumeName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tsnapshotSD := &core.Snapshot{\n\t\t\tName: snapshot.Name,\n\t\t\tVolumeID: volumeName,\n\t\t\tSnapshotID: strconv.FormatInt(snapshot.Id, 10),\n\t\t\tVolumeSize: strconv.FormatInt(size, 10),\n\t\t\tStartTime: strconv.FormatInt(snapshot.Created, 10),\n\t\t\tDescription: \"\",\n\t\t\tStatus: snapshot.State,\n\t\t}\n\t\tsnapshotsSD = append(snapshotsSD, snapshotSD)\n\t} else if volumeID != \"\" {\n\n\t\tsnapshots, err := d.client.GetSnapshotsByPath(volumeID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif snapshots == nil {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tfor _, snapshot := range snapshots {\n\t\t\tvolumeName := d.client.NameFromPath(snapshot.Path)\n\t\t\tsize, err := d.getSize(\"\", volumeName)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tsnapshotSD := &core.Snapshot{\n\t\t\t\tName: snapshot.Name,\n\t\t\t\tVolumeID: volumeName,\n\t\t\t\tSnapshotID: strconv.FormatInt(snapshot.Id, 10),\n\t\t\t\tVolumeSize: strconv.FormatInt(size, 10),\n\t\t\t\tStartTime: strconv.FormatInt(snapshot.Created, 10),\n\t\t\t\tDescription: \"\",\n\t\t\t\tStatus: snapshot.State,\n\t\t\t}\n\t\t\tsnapshotsSD = append(snapshotsSD, snapshotSD)\n\t\t}\n\t}\n\n\treturn snapshotsSD, nil\n}", "func GetOrders(db *sqlx.DB) gin.HandlerFunc {\n\n\treturn func(c *gin.Context) {\n\n\t\tvar user1 User\n\t\tuserName, exists := c.Get(\"user\")\n\t\tif !exists {\n\t\t\tc.AbortWithError(503, errors.NewAPIError(503, \"failed to get user\", \"internal server error\", c))\n\t\t\treturn\n\t\t}\n\n\t\tdbErr := db.Get(&user1, \"SELECT * FROM gaea.user WHERE user_name=$1\", userName)\n\t\tif dbErr != nil {\n\t\t\tc.AbortWithError(503, errors.NewAPIError(503, \"failed to get user\", \"internal server error\", c))\n\t\t\treturn\n\t\t}\n\n\t\tvar memberStatus bool\n\t\tswitch {\n\t\tcase user1.Role == \"nonmember\":\n\t\t\tmemberStatus = false\n\t\tdefault:\n\t\t\tmemberStatus = true\n\t\t}\n\n\t\tvar ords []Order\n\t\tvar retOrds []Order\n\t\tvar qtyOrd int\n\n\t\terr1 := db.Get(&qtyOrd, `SELECT COUNT(*) FROM gaea.order WHERE user_name=$1`,\n\t\t\tuserName)\n\t\tif err1 != nil {\n\t\t\tfmt.Println(err1)\n\t\t\tc.AbortWithError(503, errors.NewAPIError(503, \"failed to get orders\", \"internal server error\", c))\n\t\t\treturn\n\t\t}\n\t\tif qtyOrd > 0 {\n\t\t\terr2 := db.Select(&ords, `SELECT * FROM gaea.order WHERE user_name=$1`,\n\t\t\t\tuserName)\n\t\t\tif err2 != nil {\n\t\t\t\tfmt.Println(err2)\n\t\t\t\tc.AbortWithError(503, errors.NewAPIError(503, \"failed to get orders\", \"internal server error\", c))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar amtErr error\n\n\t\t\tfor _, order := range ords {\n\t\t\t\torder.ItemQty, order.AmountTotal, amtErr = CalcOrderTotals(order.OrderId, memberStatus, db)\n\t\t\t\tif amtErr != nil {\n\t\t\t\t\tfmt.Printf(\"%s\", amtErr)\n\t\t\t\t}\n\t\t\t\tretOrds = append(retOrds, order)\n\t\t\t}\n\t\t}\n\n\t\tc.JSON(200, gin.H{\"qty\": qtyOrd, \"orders\": retOrds})\n\t}\n}", "func GetAllOrders(context echo.Context) error {\n\tlimit, _ := strconv.ParseInt(context.QueryParam(\"limit\"), 10, 64)\n\tsize, _ := strconv.ParseInt(context.QueryParam(\"size\"), 10, 64)\n\n\torders, err := GetOrders(limit, size)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn util.SendError(context, \"failed\", \"something went wrong\", \"500\")\n\t}\n\treturn util.SendData(context, orders)\n}", "func GetOrders() ([]byte, error) {\n\tvar db, _ = sql.Open(\"sqlite3\", \"cache/users.sqlite3\")\n\tdefer db.Close()\n\tvar ou string\n\tvar ta, ts int64 \n\tq, err := db.Query(\"select ouid, chargedamount, timestamp from orders\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\t\n\tvar a []interface{}\n\t\n\tfor q.Next() {\n\t\tq.Scan(&ou, &ta, &ts)\n\t\tb := make(map[string]interface{})\t\n\t\tb[\"ouid\"] = ou\n\t\tb[\"chargedamount\"] = float64(ta)/100\n\t\t// b[\"timestamp\"] = ts\n\t\tb[\"timestamp\"] = string(time.Unix(ts, 0).Format(\"02.01.2006 15:04:05\"))\n\t\ta = append(a, b)\n\t}\n\t\n\tgetord, err := json.Marshal(a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\treturn getord, nil\n}", "func (a *App) getOrders(w http.ResponseWriter, r *http.Request) {\n\tpage, err := strconv.Atoi(r.URL.Query().Get(\"page\"))\n\tif err != nil {\n\t\thelpers.RespondWithError(w, http.StatusBadRequest, \"INVALID_PAGE_NUMBER\")\n\t\treturn\n\t}\n\n\tlimit, err := strconv.Atoi(r.URL.Query().Get(\"limit\"))\n\tif err != nil {\n\t\thelpers.RespondWithError(w, http.StatusBadRequest, \"INVALID_LIMIT_NUMBER\")\n\t\treturn\n\t}\n\n\tOrders, err := models.GetOrders(a.DB, (page - 1), limit)\n\tif err != nil {\n\t\thelpers.RespondWithError(w, http.StatusInternalServerError, \"DB_CONNECTION_ERR\")\n\t\treturn\n\t}\n\tif len(Orders) == 0 {\n\t\thelpers.RespondWithError(w, http.StatusInternalServerError, \"DATA_NOT_FOUND\")\n\t\treturn\n\t}\n\thelpers.RespondWithJSON(w, http.StatusOK, Orders)\n}", "func getOrders(shopCode string) error {\n\n\tmethods := []string{\"gy.erp.trade.history.get\", \"gy.erp.trade.get\"}\n\tpgSize, _ := strconv.Atoi(config.Config(\"PAGE_SIZE\"))\n\n\tif err := saveOrders(pgSize, shopCode, methods); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (w *ServerInterfaceWrapper) GetOrders(ctx echo.Context) error {\n\tvar err error\n\n\tctx.Set(ApiKeyAuthScopes, []string{\"\"})\n\n\t// Parameter object where we will unmarshal all parameters from the context\n\tvar params GetOrdersParams\n\t// ------------- Optional query parameter \"symbol\" -------------\n\n\terr = runtime.BindQueryParameter(\"form\", true, false, \"symbol\", ctx.QueryParams(), &params.Symbol)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter symbol: %s\", err))\n\t}\n\n\t// ------------- Optional query parameter \"from\" -------------\n\n\terr = runtime.BindQueryParameter(\"form\", true, false, \"from\", ctx.QueryParams(), &params.From)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter from: %s\", err))\n\t}\n\n\t// ------------- Optional query parameter \"to\" -------------\n\n\terr = runtime.BindQueryParameter(\"form\", true, false, \"to\", ctx.QueryParams(), &params.To)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter to: %s\", err))\n\t}\n\n\t// ------------- Optional query parameter \"status\" -------------\n\n\terr = runtime.BindQueryParameter(\"form\", true, false, \"status\", ctx.QueryParams(), &params.Status)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter status: %s\", err))\n\t}\n\n\t// ------------- Optional query parameter \"limit\" -------------\n\n\terr = runtime.BindQueryParameter(\"form\", true, false, \"limit\", ctx.QueryParams(), &params.Limit)\n\tif err != nil {\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf(\"Invalid format for parameter limit: %s\", err))\n\t}\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.GetOrders(ctx, params)\n\treturn err\n}", "func (s *service) GetMarketSnapshot(ctx context.Context) (MarketSnapshot, error) {\n\tlogger := log.With(s.logger, \"method\", \"GetMarketSnapshot\")\n\tsnapshot := MarketSnapshot{}\n\tif Orders.IsEmpty() {\n\t\tlevel.Error(logger).Log(\"err\", ErrOrderBookIsEmpty)\n\t\treturn snapshot, ErrOrderBookIsEmpty\n\t}\n\n\tfor order := range Orders.IterBuffered() {\n\t\tval := reflect.ValueOf(order.Val)\n\n\t\tnew := MarketSnapshotItem{\n\t\t\tPrice: val.FieldByName(\"Price\").Float(),\n\t\t\tQuantity: val.FieldByName(\"Quantity\").Int(),\n\t\t}\n\t\tif val.FieldByName(\"Status\").String() == \"Active\" {\n\t\t\tif strings.ToUpper(val.FieldByName(\"Side\").String()) == \"ASK\" {\n\t\t\t\tsnapshot.Asks = append(snapshot.Asks, new)\n\t\t\t} else {\n\t\t\t\tsnapshot.Bids = append(snapshot.Bids, new)\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// sorting\n\tsnapshot.Sort()\n\n\tsnapshot.Spread = spread.getSpread()\n\treturn snapshot, nil\n}", "func (handler *UserHandler) GetOrders(res http.ResponseWriter, req *http.Request, userID uint) {\n\torders, err := handler.store.GetOrders(userID)\n\tif err != nil {\n\t\tsendError(res, err)\n\t}\n\tsendSuccess(res, orders, 1)\n}", "func GetOpenOrders() (orders []Order, error error) {\n\tjsonData, err := doTauRequest(1, \"GET\", \"trading/myopenorders/\", nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"GetOpenOrders->%v\", err)\n\t}\n\tlog.Tracef(\"jsonData=%s\", string(jsonData))\n\tif err := json.Unmarshal(jsonData, &orders); err != nil {\n\t\treturn nil, fmt.Errorf(\"GetOpenOrders->%v\", err)\n\t}\n\treturn orders, nil\n}", "func (r Restorer) GetOrders(isBuy bool) (openedOrders []models.Order, err error) {\n\tcollection := r.Accesser.client.Database(r.Accesser.Database).Collection(r.Accesser.Table)\n\n\tfilter := bson.M{\"$and\": bson.A{\n\t\tbson.M{\"is_closed\": bson.M{\"$eq\": false}},\n\t\tbson.M{\"is_purchase\": bson.M{\"$eq\": isBuy}},\n\t}}\n\n\tcursor, err := collection.Find(context.Background(), filter)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar dbOrders []models.DatabaseOrder\n\tif err = cursor.All(context.Background(), &dbOrders); err != nil {\n\t\treturn nil, err\n\t}\n\n\topenedOrders = make([]models.Order, len(dbOrders))\n\tfor i, o := range dbOrders {\n\t\topenedOrders[i] = *o.Convert()\n\t}\n\n\treturn\n}", "func (s *PurchaseOrdersEndpoint) Get(ctx context.Context, division int, id *types.GUID) (*PurchaseOrders, error) {\n\tb, _ := s.client.ResolvePathWithDivision(\"/api/v1/{division}/purchaseorder/PurchaseOrders\", division) // #nosec\n\tu, err := api.AddOdataKeyToURL(b, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\te := &PurchaseOrders{}\n\t_, _, requestError := s.client.NewRequestAndDo(ctx, \"GET\", u.String(), nil, e)\n\treturn e, requestError\n}", "func snapshotsGet(response http.ResponseWriter, request *http.Request) {\n\n\t//getting parameters (in our case unique id)\n\tquery := request.URL.Query()\n\tdocID := query.Get(\"document_id\")\n\tlimit := query.Get(\"limit\")\n\toffset := query.Get(\"offset\")\n\tif len(docID) == 0 {\n\t\tlim, offs := utils.GetLimitOffset(limit, offset)\n\t\tsnapshotsGetAll(response, request, lim, offs)\n\t} else {\n\t\tsnapshotsGetOne(response, request, docID)\n\t}\n\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
AddOrders can be used to add orders to Mesh. It validates the given orders and if they are valid, will store and eventually broadcast the orders to peers.
func (app *App) AddOrders(signedOrdersRaw []*json.RawMessage) (*zeroex.ValidationResults, error) { allValidationResults := &zeroex.ValidationResults{ Accepted: []*zeroex.AcceptedOrderInfo{}, Rejected: []*zeroex.RejectedOrderInfo{}, } schemaValidOrders := []*zeroex.SignedOrder{} for _, signedOrderRaw := range signedOrdersRaw { signedOrderBytes := []byte(*signedOrderRaw) result, err := app.schemaValidateOrder(signedOrderBytes) if err != nil { signedOrder := &zeroex.SignedOrder{} if err := signedOrder.UnmarshalJSON(signedOrderBytes); err != nil { signedOrder = nil } log.WithField("signedOrderRaw", string(signedOrderBytes)).Info("Unexpected error while attempting to validate signedOrderJSON against schema") allValidationResults.Rejected = append(allValidationResults.Rejected, &zeroex.RejectedOrderInfo{ SignedOrder: signedOrder, Kind: MeshValidation, Status: zeroex.RejectedOrderStatus{ Code: ROInvalidSchemaCode, Message: "order did not pass JSON-schema validation: Malformed JSON or empty payload", }, }) continue } if !result.Valid() { log.WithField("signedOrderRaw", string(signedOrderBytes)).Info("Order failed schema validation") status := zeroex.RejectedOrderStatus{ Code: ROInvalidSchemaCode, Message: fmt.Sprintf("order did not pass JSON-schema validation: %s", result.Errors()), } signedOrder := &zeroex.SignedOrder{} if err := signedOrder.UnmarshalJSON(signedOrderBytes); err != nil { signedOrder = nil } allValidationResults.Rejected = append(allValidationResults.Rejected, &zeroex.RejectedOrderInfo{ SignedOrder: signedOrder, Kind: MeshValidation, Status: status, }) continue } signedOrder := &zeroex.SignedOrder{} if err := signedOrder.UnmarshalJSON(signedOrderBytes); err != nil { // This error should never happen since the signedOrder already passed the JSON schema validation above log.WithField("signedOrderRaw", string(signedOrderBytes)).Panic("Failed to unmarshal SignedOrder") } schemaValidOrders = append(schemaValidOrders, signedOrder) } validationResults, err := app.validateOrders(schemaValidOrders) if err != nil { return nil, err } for _, orderInfo := range validationResults.Accepted { allValidationResults.Accepted = append(allValidationResults.Accepted, orderInfo) } for _, orderInfo := range validationResults.Rejected { allValidationResults.Rejected = append(allValidationResults.Rejected, orderInfo) } for _, acceptedOrderInfo := range allValidationResults.Accepted { err = app.orderWatcher.Watch(acceptedOrderInfo) if err != nil { return nil, err } } return allValidationResults, nil }
[ "func (handler *rpcHandler) AddOrders(orders []*zeroex.SignedOrder) (*zeroex.ValidationResults, error) {\n\tlog.Debug(\"received AddOrders request via RPC\")\n\tvalidationResults, err := handler.app.AddOrders(orders)\n\tif err != nil {\n\t\t// We don't want to leak internal error details to the RPC client.\n\t\tlog.WithField(\"error\", err.Error()).Error(\"internal error in AddOrders RPC call\")\n\t\treturn nil, errInternal\n\t}\n\treturn validationResults, nil\n}", "func (handler *rpcHandler) AddOrders(signedOrdersRaw []*json.RawMessage, opts types.AddOrdersOpts) (results *ordervalidator.ValidationResults, err error) {\n\tlog.WithFields(log.Fields{\n\t\t\"count\": len(signedOrdersRaw),\n\t\t\"pinned\": opts.Pinned,\n\t}).Info(\"received AddOrders request via RPC\")\n\t// Catch panics, log stack trace and return RPC error message\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tinternalErr, ok := r.(error)\n\t\t\tif !ok {\n\t\t\t\t// If r is not of type error, convert it.\n\t\t\t\tinternalErr = fmt.Errorf(\"Recovered from non-error: (%T) %v\", r, r)\n\t\t\t}\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": internalErr,\n\t\t\t\t\"method\": \"AddOrders\",\n\t\t\t\t\"stackTrace\": string(debug.Stack()),\n\t\t\t}).Error(\"RPC method handler crashed\")\n\t\t\terr = errors.New(\"method handler crashed in AddOrders RPC call (check logs for stack trace)\")\n\t\t}\n\t}()\n\tvalidationResults, err := handler.app.AddOrders(handler.ctx, signedOrdersRaw, opts.Pinned)\n\tif err != nil {\n\t\t// We don't want to leak internal error details to the RPC client.\n\t\tlog.WithField(\"error\", err.Error()).Error(\"internal error in AddOrders RPC call\")\n\t\treturn nil, constants.ErrInternal\n\t}\n\treturn validationResults, nil\n}", "func (s Service) AddOrders(o OrdersCreateReq) error {\n\ttx, err := s.db.Begin()\n\tif err != nil {\n\t\tlog.Println(\"Failed to start a transaction\", err.Error())\n\t\treturn err\n\t}\n\n\torderTx, err := tx.Prepare(\"INSERT INTO api_db.order(id, email, total_price, total_weight_grams, order_number) VALUES(?,?,?,?,?)\")\n\tif err != nil {\n\t\tlog.Println(\"Failed to prepare the order transaction\", err.Error())\n\t\treturn err\n\t}\n\n\taddrTx, err := tx.Prepare(\"INSERT INTO api_db.order_shipping_address(order_id, first_name, address1, postcode) VALUES(?, ?, ?, ?)\")\n\tif err != nil {\n\t\tlog.Println(\"Failed to prepare the Address transaction\", err.Error())\n\t\treturn err\n\t}\n\n\tshippingLineTx, err := tx.Prepare(\"INSERT INTO api_db.order_to_shipping_line(order_id, shipping_line_id, title, price) VALUES(?, ?, ?, ?)\")\n\tif err != nil {\n\t\tlog.Println(\"Failed to prepare the Shipping Line transaction\", err.Error())\n\n\t\treturn err\n\t}\n\n\toLen := len(o)\n\tfor i := 0; i < oLen; i++ {\n\n\t\tcurrOrder := o[i]\n\t\tlog.Printf(\"Processing order #%d 📤\\n\", currOrder.ID)\n\t\t_, err = orderTx.Exec(\n\t\t\tcurrOrder.ID,\n\t\t\tcurrOrder.Email,\n\t\t\tcurrOrder.TotalPrice,\n\t\t\tcurrOrder.TotalWeightGrams,\n\t\t\tcurrOrder.OrderNumber,\n\t\t)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\tlog.Printf(\"Failed to save order #%d: %s\", currOrder.ID, err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tshippingAddr := currOrder.ShippingAddress\n\n\t\t// insert the shipping adddress\n\t\t_, err := addrTx.Exec(currOrder.ID, shippingAddr.FirstName, shippingAddr.Address1, shippingAddr.PostCode)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to save the shipping address\", err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tslLen := len(currOrder.ShippingLines)\n\t\tfor j := 0; j < slLen; j++ {\n\n\t\t\tsl := currOrder.ShippingLines[j]\n\t\t\t_, err := shippingLineTx.Exec(currOrder.ID, sl.ID, sl.Title, sl.Price)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Failed to save a shipping line\", err.Error())\n\t\t\t\ttx.Rollback()\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := tx.Commit(); err != nil {\n\t\tlog.Println(\"Couldn't commit the transaction\")\n\t\treturn err\n\t}\n\n\tfor i := 0; i < oLen; i++ {\n\t\tcurrOrder := o[i]\n\t\tlog.Printf(\"Delivering order #%d\\n\", currOrder.ID)\n\t\ts.deliverOrderChan <- currOrder\n\t\tlog.Printf(\"Delivered order #%d\\n\", currOrder.ID)\n\t}\n\n\treturn nil\n}", "func (a *Client) AddCustomerOrders(params *AddCustomerOrdersParams, authInfo runtime.ClientAuthInfoWriter) (*AddCustomerOrdersOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewAddCustomerOrdersParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"addCustomerOrders\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/accounts/{koronaAccountId}/customerOrders\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &AddCustomerOrdersReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*AddCustomerOrdersOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for addCustomerOrders: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (lu *LocationUpdate) AddWorkOrders(w ...*WorkOrder) *LocationUpdate {\n\tids := make([]string, len(w))\n\tfor i := range w {\n\t\tids[i] = w[i].ID\n\t}\n\treturn lu.AddWorkOrderIDs(ids...)\n}", "func (luo *LocationUpdateOne) AddWorkOrders(w ...*WorkOrder) *LocationUpdateOne {\n\tids := make([]string, len(w))\n\tfor i := range w {\n\t\tids[i] = w[i].ID\n\t}\n\treturn luo.AddWorkOrderIDs(ids...)\n}", "func (b Bl3p) AddOrder(orderType string, orderAmount int, orderPrice int) (interface{}, error) {\n\n\tprice := strconv.FormatInt(int64(orderPrice), 10)\n\tamount := strconv.FormatInt(int64(orderAmount), 10)\n\n\tparams := map[string]string{\"type\": orderType, \"amount_int\": amount, \"price_int\": price, \"fee_currency\": \"BTC\"}\n\n\taddOrder, err := b.requester(\"BTCEUR/money/order/add\", params)\n\n\tresult := callModels.AddOrder{}\n\n\tif err == nil {\n\t\terr = json.Unmarshal(addOrder.Data, &result)\n\t}\n\n\treturn result, err\n}", "func (driver *influxdb) PutOrders(data []stockdb.Order, opt stockdb.Option) (resp response) {\n\tif err := driver.check(); err != nil {\n\t\tlog(logError, err)\n\t\tresp.Message = err.Error()\n\t\treturn\n\t}\n\tif opt.Market == \"\" {\n\t\topt.Market = defaultOption.Market\n\t}\n\tif opt.Symbol == \"\" {\n\t\topt.Symbol = defaultOption.Symbol\n\t}\n\tbp, err := driver.orders2BatchPoints(data, opt)\n\tif err != nil {\n\t\tlog(logError, err)\n\t\tresp.Message = err.Error()\n\t\treturn\n\t}\n\tif err := driver.client.Write(bp); err != nil {\n\t\tif strings.Contains(err.Error(), \"database not found\") {\n\t\t\tresp = driver.putMarket(opt.Market)\n\t\t\tif resp.Success {\n\t\t\t\treturn driver.PutOrders(data, opt)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tlog(logError, err)\n\t\tresp.Message = err.Error()\n\t\treturn\n\t}\n\tresp.Success = true\n\treturn\n}", "func (handler *UserHandler) AddOrder(res http.ResponseWriter, req *http.Request, userID uint) {\n\tdata, err := handler.parseOrder(req.Body)\n\tnewOrder, ok := data.(*orm.Order)\n\tif err != nil || !ok {\n\t\tsendError(res, errors.New(\"Invalid order\"))\n\t\treturn\n\t}\n\tcreatedOrder, err := handler.store.AddOrder(userID, *newOrder)\n\tif err != nil {\n\t\tsendError(res, err)\n\t}\n\tsendSuccess(res, []orm.Order{*createdOrder}, 1)\n}", "func (uu *UserUpdate) AddOrder(m ...*Merchant) *UserUpdate {\n\tids := make([]int, len(m))\n\tfor i := range m {\n\t\tids[i] = m[i].ID\n\t}\n\treturn uu.AddOrderIDs(ids...)\n}", "func (uuo *UserUpdateOne) AddOrder(m ...*Merchant) *UserUpdateOne {\n\tids := make([]int, len(m))\n\tfor i := range m {\n\t\tids[i] = m[i].ID\n\t}\n\treturn uuo.AddOrderIDs(ids...)\n}", "func (w *Watcher) ValidateAndStoreValidOrdersV4(ctx context.Context, orders []*zeroex.SignedOrderV4, chainID int, pinned bool, opts *types.AddOrdersOpts) (*ordervalidator.ValidationResults, error) {\n\tif len(orders) == 0 {\n\t\treturn &ordervalidator.ValidationResults{}, nil\n\t}\n\tresults, validMeshOrders, err := w.meshSpecificOrderValidationV4(orders, chainID, pinned)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalidationBlock, zeroexResults, err := w.onchainOrderValidationV4(ctx, validMeshOrders)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresults.Accepted = append(results.Accepted, zeroexResults.Accepted...)\n\tresults.Rejected = append(results.Rejected, zeroexResults.Rejected...)\n\n\t// Filter out only the new orders.\n\tnewOrderInfos := []*ordervalidator.AcceptedOrderInfo{}\n\tfor _, acceptedOrderInfo := range results.Accepted {\n\t\t// If the order isn't new, we don't add to OrderWatcher.\n\t\tif acceptedOrderInfo.IsNew {\n\t\t\tnewOrderInfos = append(newOrderInfos, acceptedOrderInfo)\n\t\t}\n\t}\n\n\tif opts.KeepCancelled || opts.KeepExpired || opts.KeepFullyFilled || opts.KeepUnfunded {\n\t\tfor _, rejectedOrderInfo := range zeroexResults.Rejected {\n\t\t\t// NOTE(jalextowle): We can use the rejectedOrderInfo.Status\n\t\t\t// field to see whether or not the order is new or not. If\n\t\t\t// the order has already been stored, the rejectedOrderInfo.Status\n\t\t\t// field will be ordervalidator.ROOrderAlreadyStoredAndUnfillable.\n\t\t\t// If the rejection reason involves on-chain validation, then the\n\t\t\t// order is new.\n\t\t\tif (opts.KeepCancelled && rejectedOrderInfo.Status.Code == ordervalidator.ROCancelled.Code) ||\n\t\t\t\t(opts.KeepExpired && rejectedOrderInfo.Status.Code == ordervalidator.ROExpired.Code) ||\n\t\t\t\t(opts.KeepFullyFilled && rejectedOrderInfo.Status.Code == ordervalidator.ROFullyFilled.Code) ||\n\t\t\t\t(opts.KeepUnfunded && rejectedOrderInfo.Status.Code == ordervalidator.ROUnfunded.Code) {\n\t\t\t\tnewOrderInfos = append(newOrderInfos, &ordervalidator.AcceptedOrderInfo{\n\t\t\t\t\tOrderHash: rejectedOrderInfo.OrderHash,\n\t\t\t\t\tSignedOrder: rejectedOrderInfo.SignedOrder,\n\t\t\t\t\tSignedOrderV4: rejectedOrderInfo.SignedOrderV4,\n\t\t\t\t\t// TODO(jalextowle): Verify that this is consistent with the OrderWatcher\n\t\t\t\t\tFillableTakerAssetAmount: big.NewInt(0),\n\t\t\t\t\tIsNew: true,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\t// Add the order to the OrderWatcher. This also saves the order in the\n\t// database.\n\tallOrderEvents := []*zeroex.OrderEvent{}\n\torderEvents, err := w.add(newOrderInfos, validationBlock, pinned, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tallOrderEvents = append(allOrderEvents, orderEvents...)\n\n\tif len(allOrderEvents) > 0 {\n\t\t// NOTE(albrow): Send can block if the subscriber(s) are slow. Blocking here can cause problems when Mesh is\n\t\t// shutting down, so to prevent that, we call Send in a goroutine and return immediately if the context\n\t\t// is done.\n\t\tdone := make(chan interface{})\n\t\tgo func() {\n\t\t\tw.orderFeed.Send(allOrderEvents)\n\t\t\tdone <- struct{}{}\n\t\t}()\n\t\tselect {\n\t\tcase <-done:\n\t\t\treturn results, nil\n\t\tcase <-ctx.Done():\n\t\t\treturn results, nil\n\t\t}\n\t}\n\n\treturn results, nil\n}", "func (w *Watcher) Add(orderInfo *ordervalidator.AcceptedOrderInfo, pinned bool) error {\n\tif err := w.decreaseMaxExpirationTimeIfNeeded(); err != nil {\n\t\treturn err\n\t}\n\n\t// TODO(albrow): technically we should count the current number of orders,\n\t// remove some if needed, and then insert the order in a single transaction to\n\t// ensure that we don't accidentally exceed the maximum. In practice, and\n\t// because of the way OrderWatcher works, the distinction shouldn't matter.\n\ttxn := w.meshDB.Orders.OpenTransaction()\n\tdefer func() {\n\t\t_ = txn.Discard()\n\t}()\n\n\t// Final expiration time check before inserting the order. We might have just\n\t// changed max expiration time above.\n\tif !pinned && orderInfo.SignedOrder.ExpirationTimeSeconds.Cmp(w.maxExpirationTime) == 1 {\n\t\t// HACK(albrow): This is technically not the ideal way to respond to this\n\t\t// situation, but it is a lot easier to implement for the time being. In the\n\t\t// future, we should return an error and then react to that error\n\t\t// differently depending on whether the order was received via RPC or from a\n\t\t// peer. In the former case, we should return an RPC error response\n\t\t// indicating that the order was not in fact added. In the latter case, we\n\t\t// should effectively no-op, neither penalizing the peer or emitting any\n\t\t// order events. For now, we respond by emitting an ADDED event immediately\n\t\t// followed by a STOPPED_WATCHING event. If this order was submitted via\n\t\t// RPC, the RPC client will see a response that indicates the order was\n\t\t// successfully added, and then it will look like we immediately stopped\n\t\t// watching it. This is not too far off from what really happened but is\n\t\t// slightly inefficient.\n\t\taddedEvent := &zeroex.OrderEvent{\n\t\t\tOrderHash: orderInfo.OrderHash,\n\t\t\tSignedOrder: orderInfo.SignedOrder,\n\t\t\tFillableTakerAssetAmount: orderInfo.FillableTakerAssetAmount,\n\t\t\tEndState: zeroex.ESOrderAdded,\n\t\t}\n\t\tstoppedWatchingEvent := &zeroex.OrderEvent{\n\t\t\tOrderHash: orderInfo.OrderHash,\n\t\t\tSignedOrder: orderInfo.SignedOrder,\n\t\t\tFillableTakerAssetAmount: orderInfo.FillableTakerAssetAmount,\n\t\t\tEndState: zeroex.ESStoppedWatching,\n\t\t}\n\t\tw.orderFeed.Send([]*zeroex.OrderEvent{addedEvent, stoppedWatchingEvent})\n\t\treturn nil\n\t}\n\n\torder := &meshdb.Order{\n\t\tHash: orderInfo.OrderHash,\n\t\tSignedOrder: orderInfo.SignedOrder,\n\t\tLastUpdated: time.Now().UTC(),\n\t\tFillableTakerAssetAmount: orderInfo.FillableTakerAssetAmount,\n\t\tIsRemoved: false,\n\t\tIsPinned: pinned,\n\t}\n\terr := txn.Insert(order)\n\tif err != nil {\n\t\tif _, ok := err.(db.AlreadyExistsError); ok {\n\t\t\t// If we're already watching the order, that's fine in this case. Don't\n\t\t\t// return an error.\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tif err := txn.Commit(); err != nil {\n\t\treturn err\n\t}\n\n\terr = w.setupInMemoryOrderState(orderInfo.SignedOrder)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\torderEvent := &zeroex.OrderEvent{\n\t\tOrderHash: orderInfo.OrderHash,\n\t\tSignedOrder: orderInfo.SignedOrder,\n\t\tFillableTakerAssetAmount: orderInfo.FillableTakerAssetAmount,\n\t\tEndState: zeroex.ESOrderAdded,\n\t}\n\tw.orderFeed.Send([]*zeroex.OrderEvent{orderEvent})\n\n\treturn nil\n}", "func (o *V3SetErrorOrderInput) HasOrders() bool {\n\tif o != nil && o.Orders != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (router SubsRouter) CreateOrders(c echo.Context) error {\n\tdefer router.logger.Sync()\n\tsugar := router.logger.Sugar()\n\n\tclaims := getAdminClaims(c)\n\n\tvar cart checkout.ShoppingCart\n\tif err := c.Bind(&cart); err != nil {\n\t\treturn render.NewBadRequest(err.Error())\n\t}\n\n\tschema := checkout.NewOrderSchemaBuilder(cart, claims).\n\t\tBuild()\n\terr := router.repo.CreateOrder(schema)\n\tif err != nil {\n\t\treturn render.NewDBError(err)\n\t}\n\n\tgo func() {\n\t\tprofile, err := router.repo.LoadB2BAdminProfile(claims.AdminID)\n\t\tif err != nil {\n\t\t\tsugar.Error(err)\n\t\t\treturn\n\t\t}\n\n\t\tparcel, err := letter.OrderCreatedParcel(profile, schema.OrderRow)\n\t\tif err != nil {\n\t\t\tsugar.Error(err)\n\t\t\treturn\n\t\t}\n\n\t\terr = router.post.Deliver(parcel)\n\t\tif err != nil {\n\t\t\tsugar.Error(err)\n\t\t}\n\t}()\n\n\treturn c.JSON(http.StatusOK, schema.OrderRow)\n}", "func ValidateMsgNewOrders(ctx sdk.Context, k keeper.Keeper, msg types.MsgNewOrders) (*sdk.Result, error) {\n\tratio := \"1\"\n\tif len(msg.OrderItems) > 1 {\n\t\tratio = \"0.8\"\n\t}\n\n\tfor _, item := range msg.OrderItems {\n\t\tmsg := MsgNewOrder{\n\t\t\tSender: msg.Sender,\n\t\t\tProduct: item.Product,\n\t\t\tSide: item.Side,\n\t\t\tPrice: item.Price,\n\t\t\tQuantity: item.Quantity,\n\t\t}\n\t\terr := checkOrderNewMsg(ctx, k, msg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif k.IsProductLocked(ctx, msg.Product) {\n\t\t\treturn types.ErrIsProductLocked(msg.Product).Result()\n\t\t}\n\n\t\torder := getOrderFromMsg(ctx, k, msg, ratio)\n\t\t_, err = k.TryPlaceOrder(ctx, order)\n\t\tif err != nil {\n\t\t\treturn common.ErrInsufficientCoins(DefaultParamspace, err.Error()).Result()\n\t\t}\n\t}\n\n\treturn &sdk.Result{}, nil\n\n}", "func updateOrders(orders *def.Orders, externalButtonPress def.Order, elevatorState def.ElevatorState) {\n\tif externalButtonPress.Direction == def.DIR_STOP {\n\t\t/*Detected internal button press*/\n\t\tdistributeInternalOrderToOrderList(externalButtonPress, orders, elevatorState)\n\t}\n\tif CheckForDuplicateOrder(orders, externalButtonPress.Floor) { // TODO: DO NOT REMOVE ORDERS ALONG THE SAME DIRECTION\n\t\tfindAndReplaceOrderIfSameDirection(orders, externalButtonPress, elevatorState.Direction) //TODO\n\t\treturn\n\t}\n\n\tif len(orders.Orders) > 0 { // For safety\n\t\t// Check to see if order should be placed first based on current elevator state\n\t\tif elevatorState.Direction == externalButtonPress.Direction && FloorIsInbetween(orders.Orders[0].Floor, externalButtonPress.Floor, elevatorState.LastFloor, elevatorState.Direction) {\n\t\t\t// Insert Order in first position\n\n\t\t\torders.Orders = append(orders.Orders, def.Order{})\n\t\t\tcopy(orders.Orders[1:], orders.Orders[:])\n\t\t\torders.Orders[0] = externalButtonPress\n\t\t\treturn\n\t\t}\n\n\t}\n\n\tfor i := 1; i < len(orders.Orders); i++ {\n\t\tdirection := orders.Orders[i].Direction\n\t\tif externalButtonPress.Direction == direction { // Elevator is moving in the right direction\n\t\t\tswitch direction {\n\t\t\tcase def.DIR_UP:\n\t\t\t\tif externalButtonPress.Floor < orders.Orders[i].Floor {\n\t\t\t\t\t// Insert Order in position (i)\n\t\t\t\t\torders.Orders = append(orders.Orders, def.Order{})\n\t\t\t\t\tcopy(orders.Orders[i+1:], orders.Orders[i:])\n\t\t\t\t\torders.Orders[i] = externalButtonPress\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase def.DIR_DOWN:\n\t\t\t\tif externalButtonPress.Floor > orders.Orders[i].Floor {\n\t\t\t\t\t// Insert Order in position (i+1)\n\n\t\t\t\t\torders.Orders = append(orders.Orders, def.Order{})\n\t\t\t\t\tcopy(orders.Orders[i+1:], orders.Orders[i:])\n\t\t\t\t\torders.Orders[i] = externalButtonPress\n\t\t\t\t\treturn\n\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tfmt.Println(\"Something weird is up, buddy\")\n\t\t\t}\n\t\t}\n\t}\n\t// Place order at back of orderList\n\torders.Orders = append(orders.Orders, externalButtonPress)\n}", "func (db *DatabaseService) AddOrder(order *models.Order) error {\n\t_, err := db.db.Model(order).Insert()\n\treturn err\n}", "func (a *API) AddOrder(o *Order) (r *OrderResponse, err error) {\n\n\terr = o.Error()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"gowhmcs addorder error: %v\", err)\n\t\treturn\n\t}\n\n\tbody, err := a.Do(\"addorder\", o)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"gowhmcs addorder error: %v (%s)\", err, string(body))\n\t\treturn\n\t}\n\n\tif err = json.Unmarshal(body, &r); err != nil {\n\t\terr = fmt.Errorf(\"gowhmcs addorder error: %s\", string(body))\n\t\treturn\n\t}\n\treturn\n\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
AddPeer can be used to manually connect to a new peer.
func (app *App) AddPeer(peerInfo peerstore.PeerInfo) error { ctx, cancel := context.WithTimeout(context.Background(), peerConnectTimeout) defer cancel() return app.node.Connect(ctx, peerInfo) }
[ "func (r *Reactor) AddPeer(peer p2p.Peer) {}", "func AddPeer(w http.ResponseWriter, r *http.Request) {\n\t// Connect to the peer\n\tvar newPeers peerStr\n\n\terr := json.NewDecoder(r.Body).Decode(&newPeers)\n\tif err != nil {\n\t\tlog.Println(\"AddPeer: could not decode peer\")\n\t}\n\tlog.Println(newPeers)\n\tlog.Printf(\"AddPeer: adding=%s\", newPeers.Peer)\n\n\tConnectToPeers(newPeers.Peer)\n}", "func (g *Gossiper) AddPeer(peerAddr *(net.UDPAddr)) {\n\tpeerConn, err := net.DialUDP(\"udp4\", nil, peerAddr)\n\tif err != nil {\n\t\tfmt.Printf(\"Error: could not connect to given peer: %v\", err)\n\t\tos.Exit(-1)\n\t}\n\tg.peers.AddPeer(peerAddr.String(), peerConn)\n\n\tg.newNodes = append(g.newNodes, peerAddr.String())\n}", "func (*BaseReactor) AddPeer(peer *Peer) {}", "func (r *Relay) AddPeer(teidIn, teidOut uint32, raddr net.Addr) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tr.teidPair[teidIn] = &peer{teid: teidOut, addr: raddr}\n}", "func (c *Core) AddPeer(addr network.Address) error {\n\treturn c.server.AddPeer(addr)\n}", "func (gossiper *Gossiper) AddPeer(address string) {\n\tgossiper.peerMutex.Lock()\n\tgossiper.Peers = append(gossiper.Peers, address)\n\tgossiper.peerMutex.Unlock()\n\tgossiper.statusWaiting.Store(address, make(chan *messages.StatusPacket))\n\tgossiper.expected.Store(address, make(chan bool))\n}", "func (v *VXLAN) AddPeer(ip net.IP) error {\n\tlog.WithFields(log.Fields{\n\t\t\"interface\": v.name,\n\t\t\"ip\": ip,\n\t}).Trace(\"Adding peer to VXLAN\")\n\n\treturn netlink.NeighAppend(&netlink.Neigh{\n\t\tFamily: syscall.AF_BRIDGE,\n\t\tLinkIndex: v.vxlan.Index,\n\n\t\tState: netlink.NUD_PERMANENT,\n\t\tFlags: netlink.NTF_SELF,\n\t\tHardwareAddr: ZeroMAC,\n\t\tIP: ip,\n\t})\n}", "func (s *Service) AddPeer(fixSessionID quickfix.SessionID) *peer.Peer {\n\tp := peer.New(s.factory, fixSessionID, s.inbound)\n\ts.lock.Lock()\n\ts.peers[fixSessionID.String()] = p\n\ts.lock.Unlock()\n\treturn p\n}", "func (rs *ReactorShim) AddPeer(peer Peer) {\n\tpeerID, err := PeerIDFromString(string(peer.ID()))\n\tif err != nil {\n\t\trs.Logger.Error(\"failed to add peer\", \"peer\", peer.ID(), \"err\", err)\n\t\treturn\n\t}\n\n\tselect {\n\tcase rs.PeerUpdates.updatesCh <- PeerUpdate{PeerID: peerID, Status: PeerStatusUp}:\n\t\trs.Logger.Debug(\"sent peer update\", \"reactor\", rs.Name, \"peer\", peerID.String(), \"status\", PeerStatusUp)\n\n\tcase <-rs.PeerUpdates.Done():\n\t\t// NOTE: We explicitly DO NOT close the PeerUpdatesCh's updateCh go channel.\n\t\t// This is because there may be numerous spawned goroutines that are\n\t\t// attempting to send on the updateCh go channel and when the reactor stops\n\t\t// we do not want to preemptively close the channel as that could result in\n\t\t// panics sending on a closed channel. This also means that reactors MUST\n\t\t// be certain there are NO listeners on the updateCh channel when closing or\n\t\t// stopping.\n\t}\n}", "func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {\n\tp2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck\n\t\tChannelID: BlockchainChannel,\n\t\tMessage: &bcproto.StatusResponse{\n\t\t\tBase: bcR.store.Base(),\n\t\t\tHeight: bcR.store.Height(),\n\t\t},\n\t}, bcR.Logger)\n\t// it's OK if send fails. will try later in poolRoutine\n\n\t// peer is added to the pool once we receive the first\n\t// bcStatusResponseMessage from the peer and call pool.updatePeer()\n}", "func (g *Gateway) addPeer(p *peer) {\n\tg.peers[p.addr] = p\n\tg.addNode(p.addr)\n\tgo g.listenPeer(p)\n}", "func (c *Cluster) AddPeer(regionID, storeID, peerID uint64) {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tc.regions[regionID].addPeer(peerID, storeID)\n}", "func (table *RouteTable) AddPeer(pid peer.ID, addr ma.Multiaddr) {\n\t// logging.VLog().Debugf(\"Adding Peer: %s,%s\", pid.Pretty(), addr.String())\n\ttable.peerStore.AddAddr(pid, addr, peerstore.PermanentAddrTTL)\n\ttable.routeTable.Update(pid)\n\ttable.onRouteTableChange()\n\n}", "func (epR *EventpoolReactor) AddPeer(peer p2p.Peer) {\n\tepR.ids.ReserveForPeer(peer)\n\tgo epR.broadcastEventsRoutine(peer)\n}", "func (s *server) addPeer(p *peer) {\n\tif p == nil {\n\t\treturn\n\t}\n\n\t// Ignore new peers if we're shutting down.\n\tif atomic.LoadInt32(&s.shutdown) != 0 {\n\t\tp.Disconnect()\n\t\treturn\n\t}\n\n\t// Track the new peer in our indexes so we can quickly look it up either\n\t// according to its public key, or it's peer ID.\n\t// TODO(roasbeef): pipe all requests through to the\n\t// queryHandler/peerManager\n\ts.peersMtx.Lock()\n\n\tpubStr := string(p.addr.IdentityKey.SerializeCompressed())\n\n\ts.peersByID[p.id] = p\n\ts.peersByPub[pubStr] = p\n\n\tif p.inbound {\n\t\ts.inboundPeers[pubStr] = p\n\t} else {\n\t\ts.outboundPeers[pubStr] = p\n\t}\n\n\ts.peersMtx.Unlock()\n\n\t// Launch a goroutine to watch for the termination of this peer so we\n\t// can ensure all resources are properly cleaned up and if need be\n\t// connections are re-established.\n\tgo s.peerTerminationWatcher(p)\n\n\t// Once the peer has been added to our indexes, send a message to the\n\t// channel router so we can synchronize our view of the channel graph\n\t// with this new peer.\n\tgo s.discoverSrv.SynchronizeNode(p.addr.IdentityKey)\n}", "func (handler *rpcHandler) AddPeer(peerInfo peerstore.PeerInfo) error {\n\tlog.Debug(\"received AddPeer request via RPC\")\n\tif err := handler.app.AddPeer(peerInfo); err != nil {\n\t\tlog.WithField(\"error\", err.Error()).Error(\"internal error in AddPeer RPC call\")\n\t\treturn errInternal\n\t}\n\treturn nil\n}", "func (r *room) addPeer(p *peer) {\n\tr.Lock()\n\tdefer r.Unlock()\n\tr.peers[p.uid] = p\n}", "func (r *room) addPeer(p *Peer) {\n\tr.Lock()\n\tdefer r.Unlock()\n\tr.peers[p.uid] = p\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SubscribeToOrderEvents let's one subscribe to order events emitted by the OrderWatcher
func (app *App) SubscribeToOrderEvents(sink chan<- []*zeroex.OrderEvent) event.Subscription { subscription := app.orderWatcher.Subscribe(sink) return subscription }
[ "func (handler *rpcHandler) SubscribeToOrders(ctx context.Context) (*ethRpc.Subscription, error) {\n\tlog.Debug(\"received order event subscription request via RPC\")\n\tsubscription, err := SetupOrderStream(ctx, handler.app)\n\tif err != nil {\n\t\tlog.WithField(\"error\", err.Error()).Error(\"internal error in `mesh_subscribe` to `orders` RPC call\")\n\t\treturn nil, errInternal\n\t}\n\treturn subscription, nil\n}", "func (_EtherDelta *EtherDeltaFilterer) WatchOrder(opts *bind.WatchOpts, sink chan<- *EtherDeltaOrder) (event.Subscription, error) {\n\n\tlogs, sub, err := _EtherDelta.contract.WatchLogs(opts, \"Order\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(EtherDeltaOrder)\n\t\t\t\tif err := _EtherDelta.contract.UnpackLog(event, \"Order\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (w *Watcher) Subscribe(sink chan<- []*zeroex.OrderEvent) event.Subscription {\n\treturn w.orderScope.Track(w.orderFeed.Subscribe(sink))\n}", "func (handler *rpcHandler) SubscribeToOrders(ctx context.Context) (result *ethrpc.Subscription, err error) {\n\tlog.Debug(\"received order event subscription request via RPC\")\n\t// Catch panics, log stack trace and return RPC error message\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tinternalErr, ok := r.(error)\n\t\t\tif !ok {\n\t\t\t\t// If r is not of type error, convert it.\n\t\t\t\tinternalErr = fmt.Errorf(\"Recovered from non-error: (%T) %v\", r, r)\n\t\t\t}\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": internalErr,\n\t\t\t\t\"method\": \"SubscribeToOrders\",\n\t\t\t\t\"stackTrace\": string(debug.Stack()),\n\t\t\t}).Error(\"RPC method handler crashed\")\n\t\t\terr = errors.New(\"method handler crashed in SubscribeToOrders RPC call (check logs for stack trace)\")\n\t\t}\n\t}()\n\tsubscription, err := SetupOrderStream(ctx, handler.app)\n\tif err != nil {\n\t\tlog.WithField(\"error\", err.Error()).Error(\"internal error in `mesh_subscribe` to `orders` RPC call\")\n\t\treturn nil, constants.ErrInternal\n\t}\n\treturn subscription, nil\n}", "func (esaclient *ESAClient) OrderSubscribe(osm OrderSubscriptionMessage) (StatusMessage, error) {\n\treplyChan := make(chan ResponseMessage)\n\treqMsg := RequestMessage{Op: \"orderSubscription\", OrderSubscriptionMessage: &osm}\n\tesaclient.reqMsgChan <- WorkUnit{req: reqMsg, respChan: replyChan}\n\n\tselect {\n\tcase resp := <-replyChan:\n\t\treturn *resp.StatusMessage, nil\n\tcase <-time.After(3 * time.Second):\n\t\treturn StatusMessage{}, errors.New(\"timeout before getting response\")\n\t}\n}", "func (s *OrderService) PubSubOrderReceiver(event *pubsub.CloudEvent) error {\n\t// This JSON nonsense is an \"easy\" way to convert\n\t// The event.Data which is a map back into a real Order\n\tjsonData, err := json.Marshal(event.Data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar order spec.Order\n\tif err := json.Unmarshal(jsonData, &order); err != nil {\n\t\treturn err\n\t}\n\n\t// Now we have a real order, we can process it\n\tif err := s.ProcessOrder(order); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func SetupOrderStream(ctx context.Context, app *core.App) (*ethrpc.Subscription, error) {\n\tnotifier, supported := ethrpc.NotifierFromContext(ctx)\n\tif !supported {\n\t\treturn &ethrpc.Subscription{}, ethrpc.ErrNotificationsUnsupported\n\t}\n\n\trpcSub := notifier.CreateSubscription()\n\n\tgo func() {\n\t\torderEventsChan := make(chan []*zeroex.OrderEvent, orderEventsBufferSize)\n\t\torderWatcherSub := app.SubscribeToOrderEvents(orderEventsChan)\n\t\tdefer orderWatcherSub.Unsubscribe()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase orderEvents := <-orderEventsChan:\n\t\t\t\terr := notifier.Notify(rpcSub.ID, orderEvents)\n\t\t\t\tif err != nil {\n\t\t\t\t\t// TODO(fabio): The current implementation of `notifier.Notify` returns a\n\t\t\t\t\t// `write: broken pipe` error when it is called _after_ the client has\n\t\t\t\t\t// disconnected but before the corresponding error is received on the\n\t\t\t\t\t// `rpcSub.Err()` channel. This race-condition is not problematic beyond\n\t\t\t\t\t// the unnecessary computation and log spam resulting from it. Once this is\n\t\t\t\t\t// fixed upstream, give all logs an `Error` severity.\n\t\t\t\t\tlogEntry := log.WithFields(map[string]interface{}{\n\t\t\t\t\t\t\"error\": err.Error(),\n\t\t\t\t\t\t\"subscriptionType\": \"orders\",\n\t\t\t\t\t\t\"orderEvents\": len(orderEvents),\n\t\t\t\t\t})\n\t\t\t\t\tmessage := \"error while calling notifier.Notify\"\n\t\t\t\t\t// If the network connection disconnects for longer then ~2mins and then comes\n\t\t\t\t\t// back up, we've noticed the call to `notifier.Notify` return `i/o timeout`\n\t\t\t\t\t// `net.OpError` errors everytime it's called and no values are sent over\n\t\t\t\t\t// `rpcSub.Err()` nor `notifier.Closed()`. In order to stop the error from\n\t\t\t\t\t// endlessly re-occuring, we unsubscribe and return for encountering this type of\n\t\t\t\t\t// error.\n\t\t\t\t\tif _, ok := err.(*net.OpError); ok {\n\t\t\t\t\t\tlogEntry.Trace(message)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tif strings.Contains(err.Error(), \"write: broken pipe\") {\n\t\t\t\t\t\tlogEntry.Trace(message)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlogEntry.Error(message)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase err := <-rpcSub.Err():\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.WithField(\"err\", err).Error(\"rpcSub returned an error\")\n\t\t\t\t} else {\n\t\t\t\t\tlog.Debug(\"rpcSub was closed without error\")\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\tcase <-notifier.Closed():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn rpcSub, nil\n}", "func (_Broker *BrokerFilterer) WatchCancelOrder(opts *bind.WatchOpts, sink chan<- *BrokerCancelOrder) (event.Subscription, error) {\n\n\tlogs, sub, err := _Broker.contract.WatchLogs(opts, \"CancelOrder\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(BrokerCancelOrder)\n\t\t\t\tif err := _Broker.contract.UnpackLog(event, \"CancelOrder\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (_m *OrderBookService) SubscribeOrderBook(c *ws.Client, bt string, qt string) {\n\t_m.Called(c, bt, qt)\n}", "func SubscribeToExchangeOrderbooks(exchange string) (dispatch.Pipe, error) {\n\tservice.mu.Lock()\n\tdefer service.mu.Unlock()\n\texch, ok := service.books[strings.ToLower(exchange)]\n\tif !ok {\n\t\treturn dispatch.Pipe{}, fmt.Errorf(\"%w for %s exchange\",\n\t\t\terrCannotFindOrderbook, exchange)\n\t}\n\treturn service.Mux.Subscribe(exch.ID)\n}", "func (bf *WebSocketClient) SubscribeChildOrder() {\n\tbf.subscribe(channelChildOrder)\n}", "func SetupOrderStream(ctx context.Context, app *core.App) (*ethRpc.Subscription, error) {\n\tnotifier, supported := ethRpc.NotifierFromContext(ctx)\n\tif !supported {\n\t\treturn &ethRpc.Subscription{}, ethRpc.ErrNotificationsUnsupported\n\t}\n\n\trpcSub := notifier.CreateSubscription()\n\n\tgo func() {\n\t\torderEventsChan := make(chan []*zeroex.OrderEvent)\n\t\torderWatcherSub := app.SubscribeToOrderEvents(orderEventsChan)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase orderEvents := <-orderEventsChan:\n\t\t\t\terr := notifier.Notify(rpcSub.ID, orderEvents)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.WithField(\"error\", err.Error()).Error(\"error while calling notifier.Notify\")\n\t\t\t\t}\n\t\t\tcase <-rpcSub.Err():\n\t\t\t\torderWatcherSub.Unsubscribe()\n\t\t\t\treturn\n\t\t\tcase <-notifier.Closed():\n\t\t\t\torderWatcherSub.Unsubscribe()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn rpcSub, nil\n}", "func (_Broker *BrokerFilterer) WatchFillOrder(opts *bind.WatchOpts, sink chan<- *BrokerFillOrder) (event.Subscription, error) {\n\n\tlogs, sub, err := _Broker.contract.WatchLogs(opts, \"FillOrder\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(BrokerFillOrder)\n\t\t\t\tif err := _Broker.contract.UnpackLog(event, \"FillOrder\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (_WyvernExchange *WyvernExchangeFilterer) WatchOrdersMatched(opts *bind.WatchOpts, sink chan<- *WyvernExchangeOrdersMatched, maker []common.Address, taker []common.Address, metadata [][32]byte) (event.Subscription, error) {\n\n\tvar makerRule []interface{}\n\tfor _, makerItem := range maker {\n\t\tmakerRule = append(makerRule, makerItem)\n\t}\n\tvar takerRule []interface{}\n\tfor _, takerItem := range taker {\n\t\ttakerRule = append(takerRule, takerItem)\n\t}\n\n\tvar metadataRule []interface{}\n\tfor _, metadataItem := range metadata {\n\t\tmetadataRule = append(metadataRule, metadataItem)\n\t}\n\n\tlogs, sub, err := _WyvernExchange.contract.WatchLogs(opts, \"OrdersMatched\", makerRule, takerRule, metadataRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(WyvernExchangeOrdersMatched)\n\t\t\t\tif err := _WyvernExchange.contract.UnpackLog(event, \"OrdersMatched\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func onOrderUpdate(order kiteconnect.Order) {\n\tfmt.Printf(\"Order: %+v\", order.OrderID)\n}", "func ControlOrders(ch c.Channels) {\n\tnewOrders := make(chan msgs.OrderMsg, 1000)\n\tgo handleNewOrder(newOrders, ch)\n\tgo listenForNewOrders(newOrders, ch)\n\tgo checkForAcceptedOrders(newOrders, ch)\n\tfor {\n\t\tselect {\n\t\tcase newOrder := <-ch.DelegateOrder:\n\t\t\torderMsg := msgs.OrderMsg{Order: newOrder}\n\t\t\torderMsg.Id = (<-ch.MetaData).Id\n\t\t\tdelegateOrder(orderMsg, ch)\n\t\t\tnewOrders <- orderMsg\n\t\tcase orderCompleted := <-ch.CompletedOrder: // the external order has been taken\n\t\t\torderTensorDiffMsg := msgs.OrderTensorDiffMsg{\n\t\t\t\tOrder: orderCompleted,\n\t\t\t\tDiff: msgs.DIFF_REMOVE,\n\t\t\t\tId: (<-ch.MetaData).Id}\n\t\t\tfor i := 0; i < 5; i++ {\n\t\t\t\torderTensorDiffMsg.Send()\n\t\t\t\ttime.Sleep(1 * time.Millisecond)\n\t\t\t}\n\t\t}\n\t}\n}", "func (_WyvernExchange *WyvernExchangeFilterer) WatchOrderCancelled(opts *bind.WatchOpts, sink chan<- *WyvernExchangeOrderCancelled, hash [][32]byte) (event.Subscription, error) {\n\n\tvar hashRule []interface{}\n\tfor _, hashItem := range hash {\n\t\thashRule = append(hashRule, hashItem)\n\t}\n\n\tlogs, sub, err := _WyvernExchange.contract.WatchLogs(opts, \"OrderCancelled\", hashRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(WyvernExchangeOrderCancelled)\n\t\t\t\tif err := _WyvernExchange.contract.UnpackLog(event, \"OrderCancelled\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (room *RoomRecorder) eventsSubscribe(e EventsI) {\n\tobserver := synced.NewObserver(\n\t\tsynced.NewPairNoArgs(room_.StatusFinished, room.finish))\n\te.Observe(observer.AddPublisherCode(room_.UpdateStatus))\n}", "func (c *Connection) SubscribeLendingOrders(fn func(*Message) error) error {\n\tch := c.GetChannel(\"lendingOrderSubscribe\")\n\tif ch == nil {\n\t\treturn errors.New(\"Fail to open lendingorderSubscribe chanel\")\n\t}\n\tq := c.GetQueue(ch, \"lending_order\")\n\tif q == nil {\n\t\treturn errors.New(\"Fail to open lending order queue\")\n\t}\n\tgo func() {\n\t\tmsgs, err := c.Consume(ch, q)\n\t\tif err != nil {\n\t\t\tlogger.Error(err)\n\t\t}\n\n\t\tforever := make(chan bool)\n\n\t\tgo func() {\n\t\t\tfor d := range msgs {\n\t\t\t\tmsg := &Message{}\n\t\t\t\terr := json.Unmarshal(d.Body, msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Error(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tgo fn(msg)\n\t\t\t}\n\t\t}()\n\n\t\t<-forever\n\t}()\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ServeHTTP populates the status page template with data and serves it when there is a request.
func (s *StatusPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if s.Authenticator != nil { _, err := s.Authenticator.Authenticate(r.Context(), w, r) if errors.Is(err, oidc.ErrRedirectRequired) { return } if err != nil { http.Error(w, "Error: Authentication failed", http.StatusInternalServerError) log.Logger("webserver").Error("Authentication failed", "error", err, "time", s.Clock.Now().String()) return } } log.Logger("webserver").Info("Applier status request", "time", s.Clock.Now().String()) if s.Template == nil { http.Error(w, "Error: Unable to load HTML template", http.StatusInternalServerError) log.Logger("webserver").Error("Request failed", "error", "No template found", "time", s.Clock.Now().String()) return } ctx, cancel := context.WithTimeout(context.Background(), s.Timeout) defer cancel() waybills, err := s.KubeClient.ListWaybills(ctx) if err != nil { http.Error(w, fmt.Sprintf("Error: Unable to list Waybill resources: %v", err), http.StatusInternalServerError) log.Logger("webserver").Error("Unable to list Waybill resources", "error", err, "time", s.Clock.Now().String()) return } events, err := s.KubeClient.ListWaybillEvents(ctx) if err != nil { http.Error(w, fmt.Sprintf("Error: Unable to list Waybill events: %v", err), http.StatusInternalServerError) log.Logger("webserver").Error("Unable to list Waybill events", "error", err, "time", s.Clock.Now().String()) return } result := GetNamespaces(waybills, events, s.DiffURLFormat) rendered := &bytes.Buffer{} if err := s.Template.ExecuteTemplate(rendered, "index", result); err != nil { http.Error(w, "Error: Unable to render HTML template", http.StatusInternalServerError) log.Logger("webserver").Error("Request failed", "error", http.StatusInternalServerError, "time", s.Clock.Now().String(), "err", err) return } w.WriteHeader(http.StatusOK) if _, err := rendered.WriteTo(w); err != nil { log.Logger("webserver").Error("Request failed", "error", http.StatusInternalServerError, "time", s.Clock.Now().String(), "err", err) } log.Logger("webserver").Info("Request completed successfully", "time", s.Clock.Now().String()) }
[ "func (s *StatusPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tlog.Logger.Info(\"Applier status request\", \"time\", s.Clock.Now().String())\n\tif s.Template == nil {\n\t\thttp.Error(w, \"Error: Unable to load HTML template\", http.StatusInternalServerError)\n\t\tlog.Logger.Error(\"Request failed\", \"error\", \"No template found\", \"time\", s.Clock.Now().String())\n\t\treturn\n\t}\n\tif err := s.Template.Execute(w, s.Data); err != nil {\n\t\thttp.Error(w, \"Error: Unable to load HTML template\", http.StatusInternalServerError)\n\t\tlog.Logger.Error(\"Request failed\", \"error\", http.StatusInternalServerError, \"time\", s.Clock.Now().String())\n\t\treturn\n\t}\n\tlog.Logger.Info(\"Request completed successfully\", \"time\", s.Clock.Now().String())\n}", "func (c *Client) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\tinfo := c.Status()\n\tstatusTemplate.Execute(rw, info)\n}", "func (f *ServerStatus) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tprocessorAvailable := cli.Available\n\n\tr := render.New(render.Options{\n\t\tIndentJSON: true,\n\t})\n\n\tvar code int\n\n\tif ShuttingDown {\n\t\tstatus.Message = \"Shutting down\"\n\t\tcode = 501\n\t} else if processorAvailable {\n\t\tstatus.Message = \"OK\"\n\t\tcode = 200\n\t} else {\n\t\tstatus.Message = \"There is no processor available. Make sure you have image magick installed.\"\n\t\tcode = 501\n\t}\n\n\tr.JSON(w, code, status)\n}", "func serveHealthStatus(w http.ResponseWriter, req *http.Request) {\n\tfmt.Fprintf(w, \"OK\")\n}", "func (t *TemplateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t// once keeps track of which of these anonymous functions have already been called,\n\t// and stores their result. If they are called again it just returns the stored result.\n\t// t.once.Do(func(){\n\tt.templ = template.Must(template.ParseFiles(filepath.Join(templateDir, t.filename)))\n\t// // })\n\n\terr := t.templ.Execute(w, t.data)\n\tif err != nil {\n\t\tlog.Println(\"Error trying to render page: \", t.filename, err)\n\t}\n}", "func (exp *attackcost) StatusPage(w http.ResponseWriter, r *http.Request, code, message, additionalInfo string, sType web.ExpStatus) {\n\tcommonPageData := exp.commonData(r)\n\tif commonPageData == nil {\n\t\t// exp.blockData.GetTip likely failed due to empty DB.\n\t\thttp.Error(w, \"The database is initializing. Try again later.\",\n\t\t\thttp.StatusServiceUnavailable)\n\t\treturn\n\t}\n\tstr, err := exp.templates.Exec(\"status\", struct {\n\t\t*web.CommonPageData\n\t\tStatusType web.ExpStatus\n\t\tCode string\n\t\tMessage string\n\t\tAdditionalInfo string\n\t}{\n\t\tCommonPageData: commonPageData,\n\t\tStatusType: sType,\n\t\tCode: code,\n\t\tMessage: message,\n\t\tAdditionalInfo: additionalInfo,\n\t})\n\tif err != nil {\n\t\tlog.Errorf(\"Template execute failure: %v\", err)\n\t\tstr = \"Something went very wrong if you can see this, try refreshing\"\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\tswitch sType {\n\tcase web.ExpStatusDBTimeout:\n\t\tw.WriteHeader(http.StatusServiceUnavailable)\n\tcase web.ExpStatusNotFound:\n\t\tw.WriteHeader(http.StatusNotFound)\n\tcase web.ExpStatusFutureBlock:\n\t\tw.WriteHeader(http.StatusOK)\n\tcase web.ExpStatusError:\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t// When blockchain sync is running, status 202 is used to imply that the\n\t// other requests apart from serving the status sync page have been received\n\t// and accepted but cannot be processed now till the sync is complete.\n\tcase web.ExpStatusSyncing:\n\t\tw.WriteHeader(http.StatusAccepted)\n\tcase web.ExpStatusNotSupported:\n\t\tw.WriteHeader(http.StatusUnprocessableEntity)\n\tcase web.ExpStatusBadRequest:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\tdefault:\n\t\tw.WriteHeader(http.StatusServiceUnavailable)\n\t}\n\tio.WriteString(w, str)\n}", "func (this *TemplateHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\t// Get renderer or return NOT IMPLEMENTED error\n\trenderer := this.RenderCache.Get(this.docroot, req)\n\tif renderer == nil {\n\t\tthis.ServeError(w, Error(req, http.StatusNotImplemented))\n\t\treturn\n\t}\n\n\tthis.Debugf(\"ServeHTTP: req=%v renderer=%v\", req.URL, renderer)\n\n\t// Check for If-Modified-Since header on content\n\tif ifmodified := req.Header.Get(\"If-Modified-Since\"); ifmodified != \"\" {\n\t\tif date, err := time.Parse(http.TimeFormat, ifmodified); err == nil {\n\t\t\tif renderer.IsModifiedSince(this.docroot, req, date) == false {\n\t\t\t\tthis.Debugf(\" If-Modified-Since %v: Returning %v\", ifmodified, http.StatusNotModified)\n\t\t\t\tthis.ServeError(w, Error(req, http.StatusNotModified))\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\t// Render Content\n\tctx, err := renderer.ServeContent(this.docroot, req)\n\tif err != nil {\n\t\tthis.ServeError(w, err)\n\t\treturn\n\t} else if ctx.Content == nil {\n\t\tthis.ServeError(w, Error(req, http.StatusNoContent))\n\t\treturn\n\t}\n\n\t// Get template and template modification time\n\tvar tmpl *template.Template\n\tvar modtime time.Time\n\tif ctx.Template != \"\" {\n\t\tif tmpl, modtime, err = this.TemplateCache.Get(ctx.Template); err != nil {\n\t\t\tthis.Debugf(\" Template %q: Error: %v\", ctx.Template, err)\n\t\t\tthis.ServeError(w, Error(req, http.StatusNotFound, err.Error()))\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Update modification time for page if the template is later\n\tif ctx.Modified.IsZero() == false && modtime.After(ctx.Modified) {\n\t\tthis.Debugf(\" Template updates modification time: %v\", modtime)\n\t\tctx.Modified = modtime\n\t}\n\n\t// Set default type\n\tif ctx.Type == \"\" {\n\t\tctx.Type = \"application/octet-stream\"\n\t}\n\n\t// Set headers\n\tw.Header().Set(\"Content-Type\", ctx.Type)\n\tif ctx.Modified.IsZero() == false {\n\t\tw.Header().Set(\"Last-Modified\", ctx.Modified.Format(http.TimeFormat))\n\t} else {\n\t\tw.Header().Set(\"Cache-Control\", \"no-cache\")\n\t}\n\n\t// If no template then we expect the content to be []byte\n\tif tmpl == nil {\n\t\tif data, ok := ctx.Content.([]byte); ok {\n\t\t\tw.Header().Set(\"Content-Length\", fmt.Sprint(len(data)))\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\tif req.Method != http.MethodHead {\n\t\t\t\tw.Write(data)\n\t\t\t}\n\t\t\treturn\n\t\t} else {\n\t\t\tthis.ServeError(w, Error(req, http.StatusInternalServerError))\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Debugging\n\t/*\n\t\tif this.Logger.IsDebug() {\n\t\t\tif json, err := json.MarshalIndent(ctx.Content, \" \", \" \"); err == nil {\n\t\t\t\tthis.Debugf(string(json))\n\t\t\t}\n\t\t}\n\t*/\n\n\t// Execute through a template\n\tdata := new(bytes.Buffer)\n\tif err := tmpl.Execute(data, ctx.Content); err != nil {\n\t\tthis.ServeError(w, Error(req, http.StatusInternalServerError, err.Error()))\n\t\treturn\n\t}\n\n\t// Set content length and write data\n\tw.Header().Set(\"Content-Length\", fmt.Sprint(data.Len()))\n\tw.WriteHeader(http.StatusOK)\n\tif req.Method != http.MethodHead {\n\t\tw.Write(data.Bytes())\n\t}\n}", "func (i indexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"You are all my minions, %v, beware %v, %v!\\n\", r.RemoteAddr, r.Method, r.URL)\n\tif r.URL.Path != \"/\" {\n\t\tlog.Printf(\"Sirree, this is a wrong URL path: %v!\\n\", r.URL.Path)\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, i.pageNotFound)\n\t\treturn\n\t}\n\tif r.Method != \"GET\" {\n\t\tlog.Printf(\"Madam, the method thou art using is wrong: %v!\\n\", r.Method)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(w, i.pageBadRequest)\n\t\treturn\n\t}\n\tdata := pageData{\n\t\tTitle: \"Welcome\",\n\t\tVersion: fmt.Sprintf(\"This is version %v\", i.version),\n\t}\n\tif err := i.tmpl.Execute(w, data); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (t *staticTemplateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif err := t.templ.Execute(w, t.data); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}", "func (t *templateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"templateHandler: ServeHTTP called\")\n\tt.once.Do(func() {\n\t\tt.templ = template.Must(template.ParseFiles(filepath.Join(\"templates\", t.filename)))\n\t})\n\tt.templ.Execute(w, r)\n}", "func (t *Timer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tdefer t.UpdateSince(time.Now())\n\tt.handler.ServeHTTP(w, r)\n}", "func (s *SimpleServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tAddIPToContext(r)\n\n\t// only count non-LB requests\n\tif r.URL.Path != s.cfg.HealthCheckPath {\n\t\ts.monitor.CountRequest()\n\t\tdefer s.monitor.UncountRequest()\n\t}\n\n\ts.safelyExecuteRequest(w, r)\n}", "func (h *Handler) serveStatus(w http.ResponseWriter, r *http.Request) {\n\th.Logger.Info(\"WARNING: /status has been deprecated. Use /ping instead.\")\n\tatomic.AddInt64(&h.stats.StatusRequests, 1)\n\th.writeHeader(w, http.StatusNoContent)\n}", "func (h TestServerHandler) ServeHTTP(writer http.ResponseWriter, request *http.Request) {\n\twriter.WriteHeader(h.StatusCode)\n\twriter.Header().Add(\"Content-Type\", \"text/plain\")\n\t_, _ = writer.Write([]byte(h.Content))\n}", "func serveHTTP(ctlConf config, h healthcheck.Handler) func() {\n\t// we don't need a health checker, we already have a http endpoint that returns 200\n\tmux := http.NewServeMux()\n\tsrv := &http.Server{\n\t\tAddr: \":8080\",\n\t\tHandler: mux,\n\t}\n\n\t// add health check as well\n\tmux.HandleFunc(\"/live\", h.LiveEndpoint)\n\n\tmux.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tif _, err := w.Write([]byte(ctlConf.HTTPResponse)); err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tlogger.WithError(err).Error(\"Error responding to http request\")\n\t\t}\n\t})\n\n\tgo func() {\n\t\tlogger.Info(\"Starting HTTP Server...\")\n\t\tif err := srv.ListenAndServe(); err != http.ErrServerClosed {\n\t\t\tlogger.WithError(err).Fatal(\"Could not start HTTP server\")\n\t\t}\n\t}()\n\n\treturn func() {\n\t\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\t\tdefer cancel()\n\n\t\tif err := srv.Shutdown(ctx); err != nil {\n\t\t\tlogger.WithError(err).Fatal(\"Could not shut down HTTP server\")\n\t\t}\n\t\tlogger.Info(\"HTTP server was gracefully shut down\")\n\t}\n}", "func (Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {}", "func (s *staticGCSServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.RequestURI == \"/healthz\" {\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn\n\t}\n\tsklog.Infof(\"Request URI %s\", r.RequestURI)\n\t// path.Clean prevents directory traversal attacks, as it re-writes /../../foo/bar into\n\t// /foo/bar, which prevents walking into parent directories.\n\tfileName := strings.TrimPrefix(path.Clean(r.RequestURI), \"/\")\n\n\tvar gcsFilePath string\n\ts.mutex.RLock()\n\tif fileName == \"\" {\n\t\tgcsFilePath = s.pathToServe + \"/index.html\"\n\t} else {\n\t\tgcsFilePath = s.pathToServe + \"/\" + fileName\n\t}\n\ts.mutex.RUnlock()\n\n\tif strings.HasSuffix(gcsFilePath, \".html\") {\n\t\tmetrics2.GetCounter(\"static_server_html_page_requests\", map[string]string{\n\t\t\t\"bucket\": s.bucket,\n\t\t\t\"request_uri\": r.RequestURI,\n\t\t}).Inc(1)\n\t}\n\n\tctx, cancel := context.WithTimeout(r.Context(), time.Minute)\n\tdefer cancel()\n\tlatestReader, err := s.client.Bucket(s.bucket).Object(gcsFilePath).NewReader(ctx)\n\tif err != nil {\n\t\thttputils.ReportError(w, skerr.Wrapf(err, \"file %s\", gcsFilePath), \"Could not resolve file\", http.StatusNotFound)\n\t\treturn\n\t}\n\txb, err := io.ReadAll(latestReader)\n\tif err != nil {\n\t\thttputils.ReportError(w, skerr.Wrapf(err, \"file %s\", gcsFilePath), \"Could not read file\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\t_ = latestReader.Close()\n\n\tif strings.HasSuffix(gcsFilePath, \".js\") {\n\t\tw.Header().Set(\"Content-Type\", \"application/javascript\")\n\t} else if strings.HasSuffix(gcsFilePath, \".css\") {\n\t\tw.Header().Set(\"Content-Type\", \"text/css\")\n\t} else if strings.HasSuffix(gcsFilePath, \".html\") {\n\t\tw.Header().Set(\"Content-Type\", \"text/html\")\n\t} else {\n\t\t// Just to be safe, assume everything with an unknown extension is plain text.\n\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t}\n\t_, err = w.Write(xb)\n\tif err != nil {\n\t\tsklog.Warningf(\"Error while writing response for file %s: %s\", gcsFilePath, err)\n\t}\n}", "func (t Telemetry) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tt.rCount.Mark(1)\n\tsw := MakeLogger(w)\n\n\tstart := time.Now()\n\tt.inner.ServeHTTP(sw, r)\n\tt.tmr.Update(int64(time.Since(start) / time.Millisecond))\n\n\tif sw.Status() >= 300 {\n\t\tt.fCount.Mark(1)\n\t} else {\n\t\tt.sCount.Mark(1)\n\t}\n\n}", "func (s *SimpleHealthCheck) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif _, err := io.WriteString(w, \"ok-\"+Name); err != nil {\n\t\tLogWithFields(r).Warn(\"unable to write healthcheck response: \", err)\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ServeHTTP handles requests for forcing a run by attempting to add to the runQueue, and writes a response including the result and a relevant message.
func (f *ForceRunHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { log.Logger("webserver").Info("Force run requested") var data struct { Result string `json:"result"` Message string `json:"message"` } switch r.Method { case "POST": var ( userEmail string err error ) if f.Authenticator != nil { userEmail, err = f.Authenticator.UserEmail(r.Context(), r) if err != nil { data.Result = "error" data.Message = "not authenticated" log.Logger("webserver").Error(data.Message, "error", err) w.WriteHeader(http.StatusForbidden) break } } if err := r.ParseForm(); err != nil { data.Result = "error" data.Message = "could not parse form data" log.Logger("webserver").Error(data.Message, "error", err) w.WriteHeader(http.StatusBadRequest) break } ns := r.FormValue("namespace") if ns == "" { data.Result = "error" data.Message = "empty namespace value" log.Logger("webserver").Error(data.Message) w.WriteHeader(http.StatusBadRequest) break } waybills, err := f.KubeClient.ListWaybills(r.Context()) if err != nil { data.Result = "error" data.Message = "cannot list Waybills" log.Logger("webserver").Error(data.Message, "error", err) w.WriteHeader(http.StatusInternalServerError) break } var waybill *kubeapplierv1alpha1.Waybill for i := range waybills { if waybills[i].Namespace == ns { waybill = &waybills[i] break } } if waybill == nil { data.Result = "error" data.Message = fmt.Sprintf("cannot find Waybills in namespace '%s'", ns) w.WriteHeader(http.StatusBadRequest) break } if f.Authenticator != nil { // if the user can patch the Waybill, they are allowed to force a run hasAccess, err := f.KubeClient.HasAccess(r.Context(), waybill, userEmail, "patch") if !hasAccess { data.Result = "error" data.Message = fmt.Sprintf("user %s is not allowed to force a run on waybill %s/%s", userEmail, waybill.Namespace, waybill.Name) if err != nil { log.Logger("webserver").Error(data.Message, "error", err) } w.WriteHeader(http.StatusForbidden) break } } run.Enqueue(f.RunQueue, run.ForcedRun, waybill) data.Result = "success" data.Message = "Run queued" w.WriteHeader(http.StatusOK) default: data.Result = "error" data.Message = "must be a POST request" w.WriteHeader(http.StatusBadRequest) } w.Header().Set("Content-Type", "waybill/json; charset=UTF-8") json.NewEncoder(w).Encode(data) }
[ "func (f *ForceRunHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tlog.Logger.Info(\"Force run requested\")\n\tvar data struct {\n\t\tResult string `json:\"result\"`\n\t\tMessage string `json:\"message\"`\n\t}\n\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tselect {\n\t\tcase f.RunQueue <- true:\n\t\t\tlog.Logger.Info(\"Run queued\")\n\t\tdefault:\n\t\t\tlog.Logger.Info(\"Run queue is already full\")\n\t\t}\n\t\tdata.Result = \"success\"\n\t\tdata.Message = \"Run queued, will begin upon completion of current run.\"\n\t\tw.WriteHeader(http.StatusOK)\n\tdefault:\n\t\tdata.Result = \"error\"\n\t\tdata.Message = \"Error: force rejected, must be a POST request.\"\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tlog.Logger.Info(data.Message)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tjson.NewEncoder(w).Encode(data)\n}", "func (a *App) HandleRun(w http.ResponseWriter, r *http.Request) {\n\n\t// Get variables from the request\n\tvars := mux.Vars(r)\n\tvar variables RequestVariable\n\terr := variables.GetVariablesFromRequestVars(vars)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Check if the secret we passed in is valid, otherwise, return error 400\n\tif !a.Secret.Valid(variables.Secret) {\n\t\ta.DmnLogFile.Log.Println(\"Bad secret!\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tabortcmd := func(reason string) {\n\t\ta.DmnLogFile.Log.Println(reason)\n\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tvar sc ScheduledCommand\n\t\tsc.Status = Failed\n\t\tsc.Coutput = reason\n\t\tout, _ := json.Marshal(sc)\n\t\tio.WriteString(w, string(out))\n\t}\n\n\t// Select the dmn.Command, otherwise, if the dmn.Command hash cannot be found, return error 400\n\tselectedCmd, cerr := a.SelectCmd(variables.CmdHash)\n\n\tif cerr != nil {\n\t\tabortcmd(\"Unable to select hash: \" + variables.CmdHash)\n\t\treturn\n\t}\n\n\t// if selectedCmd.CmdHash == \"\" {\n\t// \tabortcmd(\"Invalid hash\")\n\t// \treturn\n\t// }\n\n\t_, err = os.Stat(selectedCmd.WorkingDirectory)\n\tif os.IsNotExist(err) {\n\t\tabortcmd(\"Invalid working directory: \" + selectedCmd.WorkingDirectory)\n\t\treturn\n\t}\n\n\ta.DmnLogFile.Log.Printf(\"Scheduling command %v: %v\\n\", selectedCmd.CmdHash, selectedCmd.Status)\n\tselectedCmd.Status = Scheduled\n\ta.CommandScheduler.QueuedCommands = append(a.CommandScheduler.QueuedCommands, selectedCmd)\n\ta.CommandScheduler.CommandQueue <- selectedCmd\n\n\ta.DmnLogFile.Log.Printf(\"Completed command %v: %v\\n\", selectedCmd.CmdHash, selectedCmd.Status)\n\n\tcompletedCommand := <-a.CommandScheduler.CompletedQueue\n\n\ta.DmnLogFile.Log.Printf(\"Command received from CompletedQueue: %v: %v\\n\", completedCommand.CmdHash, selectedCmd.Status)\n\n\ta.UpdateCommandDuration(selectedCmd, completedCommand.Duration)\n\n\tfor index, cmd := range a.CommandScheduler.QueuedCommands {\n\t\tif cmd.CmdHash == selectedCmd.CmdHash {\n\t\t\ta.DmnLogFile.Log.Printf(\"Updating status for %v: %v\\n\", cmd.CmdHash, Completed)\n\t\t\ta.CommandScheduler.QueuedCommands[index].Status = Completed\n\t\t\tbreak\n\t\t}\n\t}\n\n\ta.DmnLogFile.Log.Printf(\"Vacuuming command %v\\n\", selectedCmd.CmdHash)\n\ta.CommandScheduler.VacuumQueue <- selectedCmd\n\n\tout, _ := json.Marshal(completedCommand)\n\tio.WriteString(w, string(out))\n}", "func (h stubbingHandler) ServeHTTP(w http.ResponseWriter, _ *http.Request) {\n\th.mutex.Lock()\n\tdefer h.mutex.Unlock()\n\tresponses := h.holder.responses\n\tif len(responses) > 0 {\n\t\tresp := responses[0]\n\t\tw.WriteHeader(resp.responseCode)\n\t\t_, err := w.Write([]byte(resp.body))\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Can't write the response: %v\", err)\n\t\t}\n\n\t\tswitch resp.times {\n\t\tcase 0:\n\t\t\tbreak\n\t\tcase 1:\n\t\t\tshortened := responses[1:]\n\t\t\th.holder.responses = shortened\n\t\tdefault:\n\t\t\tresp.times--\n\t\t}\n\t} else {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t}\n}", "func (m *MiddlewareChain) Run(w http.ResponseWriter, req *http.Request) {\n\tm.chain.ServeHTTP(w, req)\n}", "func (c *Sender) Do(r *http.Request) (*http.Response, error) {\n\tc.attempts++\n\n\tif !c.reuseResponse || c.resp == nil {\n\t\tresp := NewResponse()\n\t\tresp.Request = r\n\t\tresp.Body = NewBody(c.content)\n\t\tresp.Status = c.status\n\t\tresp.StatusCode = c.statusCode\n\t\tc.resp = resp\n\t} else {\n\t\tc.resp.Body.(*Body).reset()\n\t}\n\n\tif c.pollAttempts > 0 {\n\t\tc.pollAttempts--\n\t\tc.resp.Status = \"Accepted\"\n\t\tc.resp.StatusCode = http.StatusAccepted\n\t\tSetAcceptedHeaders(c.resp)\n\t}\n\n\tif c.emitErrors > 0 || c.emitErrors < 0 {\n\t\tc.emitErrors--\n\t\tif c.err == nil {\n\t\t\treturn c.resp, fmt.Errorf(\"Faux Error\")\n\t\t}\n\t\treturn c.resp, c.err\n\t}\n\treturn c.resp, nil\n}", "func (s *AppServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\t// hijack request id\n\trequestID := r.Header.Get(s.requestID)\n\tif requestID == \"\" || len(requestID) > DefaultMaxHttpRequestIDLen {\n\t\trequestID = NewGID().Hex()\n\n\t\t// inject request header with new request id\n\t\tr.Header.Set(s.requestID, requestID)\n\t}\n\n\tlogger := s.logger.New(requestID)\n\tdefer s.logger.Reuse(logger)\n\n\tlogger.Debugf(`processing %s \"%s\"`, r.Method, s.filterParameters(r.URL))\n\n\t// throughput by rate limit, timeout after time.Second/throttle\n\tif s.throttle != nil {\n\t\tctx, done := context.WithTimeout(context.Background(), s.throttleTimeout)\n\t\terr := s.throttle.Wait(ctx)\n\t\tdone()\n\n\t\tif err != nil {\n\t\t\tlogger.Warnf(\"Throughput exceed: %v\", err)\n\n\t\t\tw.Header().Set(\"Retry-After\", s.throttleTimeout.String())\n\t\t\thttp.Error(w, http.StatusText(http.StatusTeapot), http.StatusTeapot)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// concurrency by channel, timeout after request+response timeouts\n\tif s.slowdown != nil {\n\t\tticker := time.NewTicker(s.slowdownTimeout)\n\n\t\tselect {\n\t\tcase <-s.slowdown:\n\t\t\tticker.Stop()\n\n\t\t\tdefer func() {\n\t\t\t\ts.slowdown <- true\n\t\t\t}()\n\n\t\tcase <-ticker.C:\n\t\t\tticker.Stop()\n\n\t\t\tlogger.Warnf(\"Concurrency exceed: %v timeout\", s.slowdownTimeout)\n\n\t\t\tw.Header().Set(\"Retry-After\", s.slowdownTimeout.String())\n\t\t\thttp.Error(w, http.StatusText(http.StatusTooManyRequests), http.StatusTooManyRequests)\n\t\t\treturn\n\t\t}\n\t}\n\n\ts.AppRoute.ServeHTTP(w, r)\n}", "func (s *StatusPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tlog.Logger.Info(\"Applier status request\", \"time\", s.Clock.Now().String())\n\tif s.Template == nil {\n\t\thttp.Error(w, \"Error: Unable to load HTML template\", http.StatusInternalServerError)\n\t\tlog.Logger.Error(\"Request failed\", \"error\", \"No template found\", \"time\", s.Clock.Now().String())\n\t\treturn\n\t}\n\tif err := s.Template.Execute(w, s.Data); err != nil {\n\t\thttp.Error(w, \"Error: Unable to load HTML template\", http.StatusInternalServerError)\n\t\tlog.Logger.Error(\"Request failed\", \"error\", http.StatusInternalServerError, \"time\", s.Clock.Now().String())\n\t\treturn\n\t}\n\tlog.Logger.Info(\"Request completed successfully\", \"time\", s.Clock.Now().String())\n}", "func (s *Layer) Run(phase string, w http.ResponseWriter, r *http.Request, h http.Handler) {\n\tdefer func() {\n\t\tif phase == \"error\" {\n\t\t\treturn\n\t\t}\n\t\tif re := recover(); re != nil {\n\t\t\tcontext.Set(r, \"error\", re)\n\t\t\ts.Run(\"error\", w, r, FinalErrorHandler)\n\t\t}\n\t}()\n\n\tif h == nil {\n\t\th = s.finalHandler\n\t}\n\n\tstack := s.Pool[phase]\n\tif stack == nil {\n\t\tif phase != \"error\" {\n\t\t\th.ServeHTTP(w, r)\n\t\t}\n\t\treturn\n\t}\n\n\tqueue := stack.Join()\n\tfor i := len(queue) - 1; i >= 0; i-- {\n\t\th = queue[i](h)\n\t}\n\n\th.ServeHTTP(w, r)\n}", "func (s *GracefulHTTPServer) Run() error {\n\terrs := make(chan error, 1)\n\tstop := make(chan os.Signal, 1)\n\tsignal.Notify(stop, os.Interrupt)\n\n\tgo func() {\n\t\terr := s.svr.Serve(s.l)\n\t\tif err != nil {\n\t\t\terrs <- err\n\t\t}\n\t}()\n\n\t// This select statement will block until we can read from EITHER our errs\n\t// channel or the stop channel. The stop channel will get a value when we get\n\t// a SIGINT signal. The errs channel will get a value if we failed\n\t// to start the server.\n\tselect {\n\tcase err := <-errs:\n\t\ts.log.Error(\"\")\n\t\treturn err\n\tcase sig := <-stop:\n\t\ts.log.Info(\"server shutdown request received\", zap.String(\"signal\", sig.String()))\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), s.timeout)\n\terr := s.svr.Shutdown(ctx)\n\tcancel() // Cancel the timeout, since we already finished.\n\n\treturn err\n}", "func (mf MiddlewareFunc) Run(req *Request, handler Handler) (*Response, error) {\n\treturn mf(req, handler)\n}", "func run() {\n\thttp.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\terr := r.ParseForm()\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tchallengeTokenString := r.Form.Get(\"challenge\")\n\t\tif challengeTokenString == \"\" {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tchallengeToken := getChallengeToken(challengeTokenString)\n\n\t\tconsentTokenString := generateConsentToken(challengeToken, \"joe@joe\", []string{\"read\", \"write\"})\n\n\t\tfmt.Printf(\"Access granted!\\n\")\n\n\t\t// TODO: Redirect only after checking user's credentials.\n\t\thttp.Redirect(w, r, challengeToken.Claims[\"redir\"].(string)+\"&consent=\"+consentTokenString, http.StatusFound)\n\t})\n\n\thttp.ListenAndServe(\":3000\", nil)\n}", "func (t *Timer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tdefer t.UpdateSince(time.Now())\n\tt.handler.ServeHTTP(w, r)\n}", "func (f *HTTPForward) Run() error {\n\thandler, err := f.getRedirectHandler()\n\tif err != nil {\n\t\tlogrus.Errorf(\"get redirect handler failed: %s\\n\", err)\n\t\treturn err\n\t}\n\n\tidleConnsClosed := make(chan struct{})\n\tgo func() {\n\t\tsigint := make(chan os.Signal, 1)\n\t\tsignal.Notify(sigint, os.Interrupt)\n\t\t<-sigint\n\n\t\t// We received an interrupt signal, shut down.\n\t\tif err := f.srv.Shutdown(f.ctx); err != nil {\n\t\t\t// Error from closing listeners, or context timeout:\n\t\t\tlog.Printf(\"HTTP server Shutdown: %v\", err)\n\t\t}\n\t\tclose(idleConnsClosed)\n\t}()\n\n\tf.srv.Addr = f.listenTo\n\tf.srv.Handler = handler\n\tlogrus.Infof(\"starting server on %s\", f.srv.Addr)\n\tif err := f.srv.ListenAndServe(); err != http.ErrServerClosed {\n\t\t// Error starting or closing listener:\n\t\tlog.Printf(\"HTTP server ListenAndServe: %v\", err)\n\t}\n\n\t<-idleConnsClosed\n\treturn nil\n}", "func (prh PlanReplayerHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tparams := mux.Vars(req)\n\tname := params[handler.FileName]\n\thandler := downloadFileHandler{\n\t\tfilePath: filepath.Join(replayer.GetPlanReplayerDirName(), name),\n\t\tfileName: name,\n\t\tinfoGetter: prh.infoGetter,\n\t\taddress: prh.address,\n\t\tstatusPort: prh.statusPort,\n\t\turlPath: fmt.Sprintf(\"plan_replayer/dump/%s\", name),\n\t\tdownloadedFilename: \"plan_replayer\",\n\t\tscheme: util.InternalHTTPSchema(),\n\t\tstatsHandle: prh.statsHandle,\n\t\tis: prh.is,\n\t}\n\thandleDownloadFile(handler, w, req)\n}", "func Process(c http.ResponseWriter, req *http.Request) {\n\tmainServer.Process(c, req)\n}", "func (s *Server) HandleHTTP(w http.ResponseWriter, r *http.Request) {\n\tctx := s.Log.WithFields(log.Fields{\n\t\t\"path\": r.URL.Path,\n\t\t\"method\": r.Method,\n\t\t\"handler\": \"HandleHTTP\",\n\t})\n\n\tctx.Info(\"request\")\n\n\tif r.URL.Path == \"/\" {\n\n\t\tif r.Method == \"GET\" {\n\n\t\t\t// serve the index file here..\n\t\t\tw.Header().Set(\"Content-Type\", \"text/html\")\n\t\t\ts.IndexTemplate.Execute(w, r.Host)\n\t\t\treturn\n\n\t\t} else if r.Method == \"POST\" {\n\n\t\t\tpayload := map[string]string{\n\t\t\t\t\"id\": NewGameID(),\n\t\t\t}\n\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tif err := json.NewEncoder(w).Encode(payload); err != nil {\n\t\t\t\tctx.Error(err.Error())\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\thttp.Error(w, \"method not allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\thttp.Error(w, \"not found\", http.StatusNotFound)\n\treturn\n}", "func (a *App) handleReq(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\thttp.Error(w, \"Invalid request method.\", 405)\n\t\treturn\n\t}\n\n\tduration, err := strconv.Atoi(r.URL.Query().Get(\"duration\"))\n\tif err != nil {\n\t\tduration = 0\n\t}\n\n\thttpcode, err := strconv.Atoi(r.URL.Query().Get(\"httpcode\"))\n\tif err != nil {\n\t\thttpcode = 200\n\t}\n\n\tworksecs, err := strconv.Atoi(r.URL.Query().Get(\"worksecs\"))\n\tif err != nil {\n\t\tworksecs = 0\n\t}\n\n\tworkfail := \"true\" == r.URL.Query().Get(\"workfail\")\n\n\tif worksecs > 0 && !a.enqueue(Job{worksecs, workfail}) {\n\t\tw.WriteHeader(507)\n\t\treturn\n\t}\n\n\ttime.Sleep(time.Duration(duration) * time.Millisecond)\n\tw.WriteHeader(httpcode)\n}", "func (h *handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tif !h.tracer.Recording() || h.requestIgnorer(req) {\n\t\th.handler.ServeHTTP(w, req)\n\t\treturn\n\t}\n\ttx, body, req := StartTransactionWithBody(h.tracer, h.requestName(req), req)\n\tdefer tx.End()\n\n\tw, resp := WrapResponseWriter(w)\n\n\tdefer func() {\n\t\tif v := recover(); v != nil {\n\t\t\tif h.panicPropagation {\n\t\t\t\tdefer panic(v)\n\t\t\t\t// 500 status code will be set only for APM transaction\n\t\t\t\t// to allow other middleware to choose a different response code\n\t\t\t\tif resp.StatusCode == 0 {\n\t\t\t\t\tresp.StatusCode = http.StatusInternalServerError\n\t\t\t\t}\n\t\t\t} else if resp.StatusCode == 0 {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t}\n\t\t\th.recovery(w, req, resp, body, tx, v)\n\t\t}\n\t\tSetTransactionContext(tx, req, resp, body)\n\t\tbody.Discard()\n\t}()\n\th.handler.ServeHTTP(w, req)\n\tif resp.StatusCode == 0 {\n\t\tresp.StatusCode = http.StatusOK\n\t}\n}", "func handle(w http.ResponseWriter, req *http.Request) {\n\tid := fmt.Sprintf(\"[%3d]\", getReqNum())\n\tlog.Println(id, req.RemoteAddr, \"requests\", req.RequestURI)\n\n\tif f := hasCached(id, req.URL); f != nil {\n\t\td, err := tryServeCached(id, w, req, f)\n\t\tif err != nil {\n\t\t\tlog.Println(id, \"cache write:\", err)\n\t\t}\n\t\tif d {\n\t\t\treturn\n\t\t}\n\t}\n\n\tr, err := requestUpstream(req)\n\tif err != nil {\n\t\tlog.Println(id, err)\n\t\tw.WriteHeader(http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\tdefer r.Body.Close()\n\tch := w.(http.CloseNotifier).CloseNotify()\n\tgo func() {\n\t\t<-ch\n\t\tlog.Println(id, \"Client closed connection\")\n\t}()\n\trh := w.Header()\n\tfor k, v := range r.Header {\n\t\t//log.Println(id, \"Response header:\", k, v[0])\n\t\trh.Set(k, v[0])\n\t}\n\tlog.Println(id, \"Upstream replied\", r.StatusCode)\n\tw.WriteHeader(r.StatusCode)\n\n\tif r.StatusCode != 200 {\n\t\tif _, err := io.Copy(w, r.Body); err != nil {\n\t\t\tlog.Println(id, \"non-200 write:\", err)\n\t\t}\n\t\treturn\n\t}\n\n\tvar o io.Writer\n\to = w\n\tp, s := shouldCache(req)\n\tif s {\n\t\tf, err := prepFile(req.URL)\n\t\tif err != nil {\n\t\t\tlog.Println(id, err)\n\t\t} else {\n\t\t\tdefer barrierSet(false, p)\n\t\t\tdefer f.Close()\n\t\t\tlog.Println(id, \"Saving\", f.Name())\n\t\t\to = io.MultiWriter(w, f)\n\t\t}\n\t}\n\tn, err := io.Copy(o, r.Body)\n\tif err != nil {\n\t\tlog.Println(id, \"upstream write:\", err)\n\t\tif s {\n\t\t\tdefer func() {\n\t\t\t\tif err := os.Remove(p); err != nil {\n\t\t\t\t\tlog.Println(id, err)\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n\tcntDown.Add(n)\n\treturn\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shutdown gracefully shuts the webserver down.
func (ws *WebServer) Shutdown() error { err := ws.server.Shutdown(context.Background()) ws.server = nil return err }
[ "func (l *Launcher) Shutdown(ctxt context.Context) error {\n\t// send Browser.close() directly to devtools URL\n\tif l.devtoolsURL != \"\" {\n\t\tconn, err := client.Dial(l.devtoolsURL)\n\t\tif err == nil {\n\t\t\t_ = conn.Write(shutdownMsg)\n\t\t}\n\t}\n\n\t// osx applications do not automatically exit when all windows (ie, tabs)\n\t// closed, so send SIGTERM.\n\t//\n\t// TODO: add other behavior here for more process options on shutdown?\n\tif runtime.GOOS == \"darwin\" && l.cmd != nil && l.cmd.Process != nil {\n\t\treturn l.cmd.Process.Signal(syscall.SIGTERM)\n\t}\n\n\treturn nil\n}", "func (l BasicLogger) Shutdown(err error) {\n\tif err != nil {\n\t\tl.logger.Fatalf(\"server closed (err=%s)\", err)\n\t\treturn\n\t}\n\n\tl.logger.Println(\"server closed\", \"(ok)\")\n}", "func Shutdown() {\n\tdefaultDaemon.Shutdown()\n}", "func (s *Server) Shutdown() {\n\tclose(stop)\n}", "func (s *Server) ShutDown(ctx context.Context) error {\n\treturn s.HTTPServer.Shutdown(ctx)\n}", "func (w *Worker) Shutdown() {\n\tfmt.Println(\"SHUTDOWN\")\n\tatomic.StoreInt32(&w.active, 0)\n\tw.rpcListener.Close()\n}", "func (ui *GUI) Shutdown() {\n\tctx, cl := context.WithTimeout(ui.cfg.Ctx, time.Second*5)\n\tdefer cl()\n\tif err := ui.server.Shutdown(ctx); err != nil {\n\t\tlog.Error(err)\n\t}\n}", "func (h *HttpHelper) Shutdown() {\n\th.Server.Close()\n}", "func (s *RestServer) Shutdown() {\n\n\tlogger.Log.Warnln(\"[REST] Shutdown REST server...\")\n\tif s.HTTPServer != nil {\n\t\t// create a deadline to wait for.\n\t\tctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Config.ShutdownTimeout)*time.Second)\n\t\tdefer cancel()\n\t\t// does not block if no connections, otherwise wait until the timeout deadline\n\t\ts.HTTPServer.Shutdown(ctx)\n\t}\n}", "func (s *Rest) Shutdown() {\n\tlog.Print(\"[WARN] shutdown rest server\")\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\ts.lock.Lock()\n\tif err := s.httpServer.Shutdown(ctx); err != nil {\n\t\tlog.Printf(\"[DEBUG] rest shutdown error, %s\", err)\n\t}\n\tlog.Print(\"[DEBUG] shutdown rest server completed\")\n\ts.lock.Unlock()\n}", "func (t *TLSServer) Shutdown(ctx context.Context) error {\n\terrC := make(chan error, 2)\n\tgo func() {\n\t\terrC <- t.httpServer.Shutdown(ctx)\n\t}()\n\tgo func() {\n\t\tt.grpcServer.server.GracefulStop()\n\t\terrC <- nil\n\t}()\n\terrors := []error{}\n\tfor i := 0; i < 2; i++ {\n\t\terrors = append(errors, <-errC)\n\t}\n\treturn trace.NewAggregate(errors...)\n}", "func (p *Port) shutdown(ctx context.Context) {\n\t_ = p.httpServer().Shutdown(ctx)\n}", "func (c *Controller) Shutdown(w http.ResponseWriter, r *http.Request) {\n\tc.Logger.Debug(\"Received: \" + html.EscapeString(r.URL.Path))\n\n\t// Check server has been setup\n\tif c.server == nil {\n\t\tc.Logger.Fatal(\"Server has not been setup\")\n\t\treturn\n\t}\n\n\t// Close the unity server connection\n\tc.unityViewer.StopServer()\n\n\t// Stop the server from listening\n\tc.server.Shutdown(context.Background())\n\tc.Logger.Info(\"Server Shutdown\")\n}", "func (r *Receiver) Shutdown(ctx context.Context) error {\n\tr.stopOnce.Do(func() {\n\t\tr.scraper.stop()\n\t})\n\treturn nil\n}", "func (dd *DefaultDriver) Shutdown(ctx context.Context) error {\n\treturn dd.Server.Shutdown(ctx)\n}", "func ShutDown(srv *http.Server) {\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\tsrv.Shutdown(ctx)\n}", "func (s *Rest) Shutdown() {\n\tlog.Print(\"[WARN] shutdown rest server\")\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\n\tdefer cancel()\n\n\ts.lock.Lock()\n\n\tif s.httpServer != nil {\n\t\tif err := s.httpServer.Shutdown(ctx); err != nil {\n\t\t\tlog.Printf(\"[DEBUG] rest shutdown error, %s\", err)\n\t\t}\n\t}\n\n\tlog.Print(\"[DEBUG] shutdown rest server completed\")\n\n\ts.lock.Unlock()\n}", "func (s *Stats) Shutdown() {\n\tlog.Println(\"[Stats] -> Closing service\")\n\tclose(s.shutdown)\n}", "func ShutDown(ctx *fasthttp.RequestCtx, shutdown chan<- os.Signal) {\n\tctx.SetStatusCode(200)\n\tctx.WriteString(\"Server Shutdown\")\n\tclose(shutdown)\n\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithHTTPTimeout returns an HTTPCheckerOption that specifies the timeout for HTTP requests. Setting a timeout is highly recommended, but it needs to be carefully chosen to avoid false results.
func WithHTTPTimeout(timeout time.Duration) HTTPCheckerOption { return func(c *HTTPChecker) { c.timeout = timeout } }
[ "func WithHTTPTimeout(in time.Duration) Option {\n\treturn func(c *Client) {\n\t\tc.HTTPTimeout = in\n\t}\n}", "func WithHTTPTimeout(timeout time.Duration) ClientOps {\r\n\treturn func(c *clientOptions) {\r\n\t\tc.httpTimeout = timeout\r\n\t}\r\n}", "func HTTPTimeout(duration time.Duration) HTTPOption {\n\treturn func(c *HTTPCollector) { c.client.Timeout = duration }\n}", "func ConfigHTTPTimeout(t time.Duration) ConfigOption {\n\treturn func(cfg *config.Config) error {\n\t\tvar timeout = &t\n\t\tcfg.Timeout = timeout\n\t\treturn nil\n\t}\n}", "func WithTimeout(t time.Duration) ClientOption {\n\treturn func(c *Client) error {\n\t\tc.http.Timeout = t\n\t\treturn nil\n\t}\n}", "func OptTLSHandshakeTimeout(d time.Duration) Option {\n\treturn func(r *Request) error {\n\t\tif r.Client == nil {\n\t\t\tr.Client = &http.Client{}\n\t\t}\n\t\tif r.Client.Transport == nil {\n\t\t\tr.Client.Transport = &http.Transport{}\n\t\t}\n\t\tif typed, ok := r.Client.Transport.(*http.Transport); ok {\n\t\t\ttyped.TLSHandshakeTimeout = d\n\t\t}\n\t\treturn nil\n\t}\n}", "func WithHTTPServerTimeout(t time.Duration) Option {\n\treturn func(s *Server) {\n\t\ts.HTTPServerTimeout = t\n\t}\n}", "func OptTimeout(d time.Duration) Option {\n\treturn func(r *Request) error {\n\t\tif r.Client == nil {\n\t\t\tr.Client = &http.Client{}\n\t\t}\n\t\tr.Client.Timeout = d\n\t\treturn nil\n\t}\n}", "func WithTimeout(t time.Duration) OptFunc {\n\treturn func(d *Downloader) {\n\t\td.timeout = t\n\t}\n}", "func OptionHTTPTimeouts(read, write, idle time.Duration, disableKeepAlive bool) Option {\n\treturn func(cfg *gwconfig) {\n\t\tcfg.httpReadTimeout = read\n\t\tcfg.httpWriteTimeout = write\n\t\tcfg.httpIdleTimeout = idle\n\t\tcfg.httpDisableKeepAlive = disableKeepAlive\n\t}\n}", "func Timeout(timeout time.Duration) Option {\n\treturn func(client *http.Client) {\n\t\tclient.Timeout = timeout\n\t}\n}", "func TimeoutOption(d time.Duration) Option {\n\treturn func(w *Webman) {\n\t\tw.timeout = d\n\t}\n}", "func Timeout(d time.Duration) ConfigOpt {\n\treturn func(c *Config) {\n\t\tc.transport.ResponseHeaderTimeout = d\n\t\tc.transport.TLSHandshakeTimeout = d\n\t\tc.dialer.Timeout = d\n\t}\n}", "func (o *Options) HTTPRequestTimeout() uint {\n\treturn o.httpRequestTimeout\n}", "func WithTimeout(timeout time.Duration) DownloaderOption {\n\treturn func(d *HTTPDownloader) {\n\t\td.downloadTimeout = timeout\n\t}\n}", "func WithTimeout(timeout time.Duration) ClientOption {\n\treturn withTimeout{timeout}\n}", "func (d *Dnsfilter) SetHTTPTimeout(t time.Duration) {\n\td.client.Timeout = t\n}", "func (manager Manager) HTTPTimeout() time.Duration {\n\treturn manager.viperConfig.GetDuration(httpTimeout)\n}", "func NewHTTPClientWithTimeout(t time.Duration) *http.Client {\n\ttr := &http.Transport{\n\t\t// Added IdleConnTimeout to reduce the time of idle connections which\n\t\t// could potentially slow macOS reconnection when there is a sudden\n\t\t// network disconnection/issue\n\t\tIdleConnTimeout: t,\n\t\tProxy: http.ProxyFromEnvironment,\n\t}\n\th := &http.Client{\n\t\tTransport: tr,\n\t\tTimeout: t}\n\treturn h\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WithHTTPMethod returns an HTTPCheckerOption that specifies the method for HTTP requests. The default method is "GET" which works in most of the cases, but another popular choice is "HEAD".
func WithHTTPMethod(method string) HTTPCheckerOption { return func(c *HTTPChecker) { c.method = method } }
[ "func WithMethod(method string) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http method option can not set nil protocol\")\n\t\t}\n\t\tmethod = strings.TrimSpace(method)\n\t\tif method != \"\" {\n\t\t\tif p.RequestTemplate == nil {\n\t\t\t\tp.RequestTemplate = &nethttp.Request{}\n\t\t\t}\n\t\t\tp.RequestTemplate.Method = method\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"http method option was empty string\")\n\t}\n}", "func WithMethod(method string) Option {\n\treturn func(r *RequestClient) {\n\t\tr.method = method\n\t}\n}", "func SetHTTPMethod(method string) auth.Option {\n\treturn internal.SetRequesterMethod(method)\n}", "func OptMethod(method string) Option {\n\treturn RequestOption(webutil.OptMethod(method))\n}", "func OptMethod(method string) Option {\n\treturn func(r *Request) error {\n\t\tr.Method = method\n\t\treturn nil\n\t}\n}", "func OptGet() Option {\n\treturn func(r *Request) error {\n\t\tr.Method = \"GET\"\n\t\treturn nil\n\t}\n}", "func (aauo *APIAuditUpdateOne) SetHTTPMethod(s string) *APIAuditUpdateOne {\n\taauo.mutation.SetHTTPMethod(s)\n\treturn aauo\n}", "func (c *RESTClient) HTTPMethod(method string, url string, args ...interface{}) (*Response, error) {\n\tvar err error\n\tvar response Response\n\tr := &response\n\n\tswitch method {\n\tcase \"GET\":\n\t\tr, err = c.get(string(url))\n\tcase \"DELETE\":\n\t\tr, err = c.delete(string(url))\n\tcase \"POST\":\n\t\tr, err = c.post(string(url), args[0].(map[string]interface{}))\n\tcase \"PUT\":\n\t\tr, err = c.put(string(url), args[0].(map[string]interface{}))\n\tcase \"\":\n\t\treturn nil, errors.New(\"Unsupported method.\")\n\t}\n\n\treturn r, err\n}", "func (aau *APIAuditUpdate) SetHTTPMethod(s string) *APIAuditUpdate {\n\taau.mutation.SetHTTPMethod(s)\n\treturn aau\n}", "func HasHTTPRuleOptions(method *descriptor.MethodDescriptorProto) bool {\n\toptions := method.GetOptions()\n\tif options == nil {\n\t\treturn false\n\t}\n\treturn proto.HasExtension(options, annotations.E_Http)\n}", "func (i Internet) HTTPMethod() string {\n\treturn i.Faker.RandomStringElement([]string{\n\t\thttp.MethodGet,\n\t\thttp.MethodHead,\n\t\thttp.MethodPost,\n\t\thttp.MethodPut,\n\t\thttp.MethodPatch,\n\t\thttp.MethodDelete,\n\t\thttp.MethodConnect,\n\t\thttp.MethodOptions,\n\t\thttp.MethodTrace,\n\t})\n}", "func (ep *Endpoint) SetMethod(method string) *Endpoint {\n\tif stringInSlice(method, HttpMethods) {\n\t\tep.Method = method\n\t} else {\n\t\tlog.Fatalf(\"method %q is not supported\", method)\n\t}\n\treturn ep\n}", "func (server *HTTPServer) handleOptionsMethod(nextHandler http.Handler) http.Handler {\n\treturn http.HandlerFunc(\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tif r.Method == \"OPTIONS\" {\n\t\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t} else {\n\t\t\t\tnextHandler.ServeHTTP(w, r)\n\t\t\t}\n\t\t})\n}", "func (o ServerGroupHealthCheckOutput) HttpCheckMethod() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ServerGroupHealthCheck) *string { return v.HttpCheckMethod }).(pulumi.StringPtrOutput)\n}", "func (r *Request) HTTPMethod() string {\n\treturn r.httpMethod\n}", "func (f *APIAuditFilter) WhereHTTPMethod(p entql.StringP) {\n\tf.Where(p.Field(apiaudit.FieldHTTPMethod))\n}", "func (r *irequest) SetMethod(method string, options ...interface{}) Request {\n\tr.Method = strings.ToUpper(method)\n\n\tif len(options) > 0 {\n\t\tif v, ok := options[0].(string); ok {\n\t\t\tr.SetUrl(v)\n\t\t}\n\t}\n\n\tif len(options) > 1 {\n\t\tr.SetBody(options[1])\n\t}\n\n\tif len(options) > 2 {\n\t\tif v, ok := options[2].(map[string]string); ok {\n\t\t\tr.SetHeaders(v)\n\t\t}\n\t}\n\n\tif len(options) > 3 {\n\t\tif v, ok := options[3].(map[string]string); ok {\n\t\t\tr.SetQueries(v)\n\t\t}\n\t}\n\n\treturn r\n}", "func (c *Client) Options(url string, headers, queryParams map[string][]string) (response *http.Response, err error) {\n\treturn c.makeRequest(url, http.MethodOptions, headers, queryParams, nil)\n}", "func httpMethodBuilder(m string, ac AccessControl, handler http.Handler, router *httprouter.Router, status string, url string, proxyConfig *Proxy) {\n\tlogger.Debugf(\"[DEBUG] LINK:\", m, url)\n\tswitch m {\n\tcase \"GET\":\n\t\trouter.GET(ac.Route, easyJWT(handler, ac, proxyConfig.Connect.HeaderPrefix))\n\tcase \"POST\":\n\t\trouter.POST(ac.Route, easyJWT(handler, ac, proxyConfig.Connect.HeaderPrefix))\n\tcase \"PUT\":\n\t\trouter.PUT(ac.Route, easyJWT(handler, ac, proxyConfig.Connect.HeaderPrefix))\n\tcase \"DELETE\":\n\t\trouter.DELETE(ac.Route, easyJWT(handler, ac, proxyConfig.Connect.HeaderPrefix))\n\tcase \"HEAD\":\n\t\trouter.HEAD(ac.Route, easyJWT(handler, ac, proxyConfig.Connect.HeaderPrefix))\n\t}\n\t// always OPTIONS\n\tif h, _, _ := router.Lookup(\"OPTIONS\", ac.Route); h == nil {\n\t\tlogger.Debugf(\"[DEBUG] LINK: OPTIONS\", url)\n\t\trouter.OPTIONS(ac.Route, func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\t\t\tlogger.Debugf(\"[DEBUG] set cors\", r.URL)\n\t\t\tw, r = addCORSHeaders(w, r)\n\t\t\tw.Write([]byte(\"\"))\n\t\t\treturn\n\t\t})\n\t}\n\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewHTTPChecker creates a new HTTPChecker with a URL and optional configuration. Example: checker := healthz.NewHTTPChecker(" healthz.WithHTTPTimeout(3time.Second))
func NewHTTPChecker(url string, opts ...HTTPCheckerOption) *HTTPChecker { checker := &HTTPChecker{ url: url, method: http.MethodGet, } for _, opt := range opts { opt(checker) } return checker }
[ "func NewHTTPCheck(name, endpoint string) (Check, error) {\n\tep, err := url.Parse(endpoint)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thc := &httpCheck{\n\t\tcheck: newCheck(name, ep.Hostname(), CheckTypeHTTP),\n\t\tURL: ep.Path,\n\t}\n\n\tif ep.Scheme == \"https\" {\n\t\thc.Encryption = true\n\t}\n\n\tif ep.User != nil {\n\t\tif ep.User.Username() != \"\" {\n\t\t\thc.Auth = ep.User.Username()\n\t\t}\n\n\t\tif pass, ok := ep.User.Password(); ok {\n\t\t\thc.Auth = hc.Auth + \":\" + pass\n\t\t}\n\t}\n\n\tif ep.Port() != \"\" {\n\t\thc.Port, err = strconv.Atoi(ep.Port())\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if ep.Scheme == \"https\" {\n\t\thc.Port = 443\n\t}\n\n\treturn hc, nil\n}", "func WithHTTPTimeout(timeout time.Duration) HTTPCheckerOption {\n\treturn func(c *HTTPChecker) {\n\t\tc.timeout = timeout\n\t}\n}", "func NewHTTP(\n\turlValue string,\n\thttpHost string,\n\tpersistentAddresses []string,\n\tpersistentConnection bool,\n\texpectedStatusCode int,\n\tlabels map[string]string,\n\tannotations types.MetricAnnotations,\n) *HTTPCheck {\n\ttlsConfig := &tls.Config{\n\t\tInsecureSkipVerify: true, //nolint:gosec\n\t}\n\n\tmainTCPAddress := \"\"\n\n\tif u, err := url.Parse(urlValue); err != nil {\n\t\tport := u.Port()\n\t\tif port == \"\" && u.Scheme == \"http\" {\n\t\t\tport = \"80\"\n\t\t} else if port == \"\" && u.Scheme == \"https\" {\n\t\t\tport = \"443\"\n\t\t}\n\n\t\tmainTCPAddress = fmt.Sprintf(\"%s:%s\", u.Hostname(), port)\n\t}\n\n\thc := &HTTPCheck{\n\t\turl: urlValue,\n\t\thttpHost: httpHost,\n\t\texpectedStatusCode: expectedStatusCode,\n\t\tclient: &http.Client{\n\t\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\t\treturn http.ErrUseLastResponse\n\t\t\t},\n\t\t\tTransport: types.NewHTTPTransport(tlsConfig),\n\t\t},\n\t}\n\n\thc.baseCheck = newBase(mainTCPAddress, persistentAddresses, persistentConnection, hc.httpMainCheck, labels, annotations)\n\n\treturn hc\n}", "func NewHTTPCheckLister(indexer cache.Indexer) HTTPCheckLister {\n\treturn &hTTPCheckLister{indexer: indexer}\n}", "func NewURLHealthChecker(parameters map[string]interface{}) (*URLHealthChecker, error) {\n\turl, ok1 := parameters[\"url\"]\n\ttimeout, ok2 := parameters[\"timeout\"]\n\n\tif !(ok1 && ok2) {\n\t\treturn nil, errors.New(\"url and timeout fields are required\")\n\t}\n\n\treturn &URLHealthChecker{\n\t\turl: url.(string),\n\t\ttimeout: toFloat64(timeout),\n\t}, nil\n}", "func NewHealthChecker(k8sClient kubernetes.Interface, metrics instrumentation.Metrics, baseURL string) HealthChecker {\n\n\treturn HealthChecker{client: client, k8sClient: k8sClient, metrics: metrics, baseURL: baseURL}\n}", "func NewTCPChecker(name string, endpoint string, timeout time.Duration) ServiceChecker {\n\treturn &TCPServiceConfig{\n\t\tname: name,\n\t\tendpoint: endpoint,\n\t\ttimeout: timeout,\n\t}\n}", "func (c *HTTPChecker) Check() error {\n\tclient := &http.Client{\n\t\tTimeout: c.timeout,\n\t}\n\n\treq, err := http.NewRequest(c.method, c.url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn ErrCheckFailed\n\t}\n\n\treturn nil\n}", "func NewHTTPGetChecker(client *http.Client, expectedStatus int) *HTTPGetChecker {\n\treturn &HTTPGetChecker{\n\t\tclient: client,\n\t\texpectedStatus: expectedStatus,\n\t}\n}", "func HTTPGetCheck(url string, timeout time.Duration) Check {\n\tclient := http.Client{\n\t\tTimeout: timeout,\n\t\t// never follow redirects\n\t\tCheckRedirect: func(*http.Request, []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t}\n\treturn func() error {\n\t\tresp, err := client.Get(url)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresp.Body.Close()\n\t\tif resp.StatusCode != 200 {\n\t\t\treturn fmt.Errorf(\"returned status %d\", resp.StatusCode)\n\t\t}\n\t\treturn nil\n\t}\n}", "func NewHTTPOptions(URL string) *HTTPOptions {\n\to := HTTPOptions{\n\t\tURL: URL,\n\t\tTickerInterval: 20,\n\t\tTimeout: 60,\n\t\tExpectedStatus: http.StatusOK,\n\t\tHeaders: make(map[string]string),\n\t}\n\treturn &o\n}", "func NewHealthcheck(m seesaw.HealthcheckMode, t seesaw.HealthcheckType, port uint16) *Healthcheck {\n\treturn &Healthcheck{\n\t\tMode: m,\n\t\tType: t,\n\t\tPort: port,\n\t}\n}", "func NewHealthChecker(cloud HealthCheckProvider, healthCheckPath string, defaultBackendSvc types.NamespacedName, recorderGetter RecorderGetter, serviceGetter ServiceGetter, enableTHC bool) *HealthChecks {\n\tci := generateClusterInfo(cloud.(*gce.Cloud))\n\treturn &HealthChecks{cloud, healthCheckPath, defaultBackendSvc, recorderGetter, serviceGetter, ci, enableTHC}\n}", "func NewCheck(vInt, vTmout, vMin int, addr string) HealthCheck {\n\tif vInt < 1 {\n\t\tvInt = 5\n\t}\n\n\tif vTmout < 1 {\n\t\tvTmout = 5\n\t}\n\n\tif vMin < 1 {\n\t\tvMin = 3\n\t}\n\n\tc := HealthCheck{\n\t\tInterval: vInt,\n\t\tTimeout: vTmout,\n\t\tMinimum: vMin,\n\t\tAddress: addr,\n\t}\n\n\treturn c\n}", "func NewHealthchecker(log *logrus.Logger, hostname string) Healthchecker {\n\treturn &healthchecker{\n\t\tlog: log.WithField(\"service\", \"lookup\"),\n\t\thostname: hostname,\n\t}\n}", "func New() *Checker {\n\treturn &Checker{\n\t\tHostname: Hostname,\n\t\tMaxTimeInFailure: maxTimeInFailure,\n\t}\n}", "func NewHealthCheck(opt ...Option) *HealthCheck {\n\topts := GetOpts(opt...)\n\n\th := &HealthCheck{\n\t\tstatus: &healthStatus{},\n\t}\n\tif e, ok := opts[optionWithEngine].(*gin.Engine); ok {\n\t\th.Engine = e\n\t}\n\tif path, ok := opts[optionWithHealthPath].(string); ok {\n\t\th.HealthPath = path\n\t} else {\n\t\th.HealthPath = \"/ready\"\n\t}\n\tif handler, ok := opts[optionWithHealthHandler].(gin.HandlerFunc); ok {\n\t\th.Handler = handler\n\t} else {\n\t\th.Handler = h.DefaultHealthHandler()\n\t}\n\n\tif ticker, ok := opts[optionHealthTicker].(*time.Ticker); ok {\n\t\th.metricTicker = ticker\n\t} else {\n\t\th.metricTicker = time.NewTicker(DefaultHealthTickerDuration)\n\t}\n\n\treturn h\n}", "func (s *hTTPCheckLister) HTTPChecks(namespace string) HTTPCheckNamespaceLister {\n\treturn hTTPCheckNamespaceLister{indexer: s.indexer, namespace: namespace}\n}", "func NewHealthcheck(f func(Healthcheck)) Healthcheck {\n\tif !Enabled {\n\t\treturn NilHealthcheck{}\n\t}\n\treturn &StandardHealthcheck{nil, f}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check implements the Checker interface and checks the HTTP endpoint status.
func (c *HTTPChecker) Check() error { client := &http.Client{ Timeout: c.timeout, } req, err := http.NewRequest(c.method, c.url, nil) if err != nil { return err } resp, err := client.Do(req) if err != nil { return err } if resp.StatusCode != http.StatusOK { return ErrCheckFailed } return nil }
[ "func Check(url string, client HTTPGetInterface) (Status, error) {\n\tres, err := client.Get(url)\n\tif err != nil {\n\t\treturn Unknown, err\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode >= http.StatusOK && res.StatusCode < http.StatusBadRequest {\n\t\treturn Healthy, nil\n\t}\n\tglog.V(1).Infof(\"Health check failed for %s, Response: %v\", url, *res)\n\treturn Unhealthy, nil\n}", "func (b *backend) healthCheck() {\n\t// If no checkurl har been set, assume we are healthy\n\tif b.HealthURL == \"\" {\n\t\tb.Stats.Healthy = true\n\t\treturn\n\t}\n\n\treq, err := http.NewRequest(\"GET\", b.HealthURL, nil)\n\tif err != nil {\n\t\tlog.Println(\"Error checking health of\", b.HealthURL, \"Error:\", err)\n\t}\n\n\treq.Header.Set(\"User-Agent\", \"doproxy health checker\")\n\n\tb.Stats.mu.Unlock()\n\t// Perform the check\n\tresp, err := b.healthClient.Do(req)\n\n\tb.Stats.mu.Lock()\n\t// Check response\n\tif err != nil {\n\t\tb.Stats.healthFailures++\n\t\tlog.Println(\"Error checking health of\", b.HealthURL, \"Error:\", err)\n\t\treturn\n\t}\n\tif resp.StatusCode >= 500 {\n\t\tb.Stats.healthFailures++\n\t\tlog.Println(\"Error checking health of\", b.HealthURL, \"Status code:\", resp.StatusCode)\n\t} else {\n\t\t// Reset failures\n\t\tb.Stats.healthFailures = 0\n\t}\n\tresp.Body.Close()\n}", "func (e *Endpoint) Check(ctx echo.Context) error {\n\thealthData := e.service.HealthCheck()\n\n\tif !healthData.Database {\n\t\treturn ctx.JSON(http.StatusServiceUnavailable, healthData)\n\t}\n\treturn ctx.JSON(http.StatusOK, healthData)\n}", "func (h *HealthImpl) Check(ctx context.Context, req *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) {\n\n\treturn &grpc_health_v1.HealthCheckResponse{\n\t\tStatus: grpc_health_v1.HealthCheckResponse_SERVING,\n\t}, nil\n}", "func (s *Server) healthCheck(w http.ResponseWriter, r *http.Request) {\n\t// TODO: Maybe in the future check that ES is reachable?\n\tfmt.Fprintf(w, \"OK\")\n}", "func (s *server) Check(_ context.Context, request *healthgrpc.HealthCheckRequest) (*healthgrpc.HealthCheckResponse, error) {\n\tif request.Service == \"\" {\n\t\treturn &healthgrpc.HealthCheckResponse{\n\t\t\tStatus: healthgrpc.HealthCheckResponse_SERVING,\n\t\t}, nil\n\t}\n\n\tif checker, ok := s.serviceMap[request.Service]; ok {\n\t\treturn &healthgrpc.HealthCheckResponse{\n\t\t\tStatus: checker.Check(),\n\t\t}, nil\n\t}\n\n\treturn nil, status.Errorf(codes.Unknown, \"Unknown service: %v\", request.Service)\n}", "func Check() (status, message string, err error) {\n\tif serviceStatus.overall {\n\t\treturn StatusHealthy, MessageHealthy, nil\n\t}\n\n\treturn StatusNotHealthy, MessageNotHealthy, nil\n}", "func healthCheck(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"Status OK.\\n\")\n}", "func (t *TCPChecker) Check(extConfig external.Check) *pkg.CheckResult {\n\tc := extConfig.(v1.TCPCheck)\n\taddr, port, err := extractAddrAndPort(c.Endpoint)\n\tif err != nil {\n\t\treturn Failf(c, err.Error())\n\t}\n\n\ttimeout := time.Millisecond * time.Duration(c.ThresholdMillis)\n\tconn, err := net.DialTimeout(\"tcp\", net.JoinHostPort(addr, port), timeout)\n\tif err != nil {\n\t\treturn Failf(c, \"Connection error: %s\", err.Error())\n\t}\n\tif conn != nil {\n\t\tdefer conn.Close()\n\t}\n\treturn Passf(c, \"Successfully opened: %s\", net.JoinHostPort(addr, port))\n}", "func (s Server) Check(ctx context.Context, request *healthservice.HealthCheckRequest) (*healthservice.HealthCheckResponse, error) {\n\tlogger.LoggerHealth.Debugf(\"Querying health state for Adapter service \\\"%s\\\"\", request.Service)\n\tlogger.LoggerHealth.Debugf(\"Internal health state map: %v\", serviceHealthStatus)\n\n\tif request.Service == \"\" {\n\t\t// overall health of the server\n\t\tisHealthy := true\n\t\tfor _, ok := range serviceHealthStatus {\n\t\t\tisHealthy = isHealthy && ok\n\t\t}\n\n\t\tif isHealthy {\n\t\t\tlogger.LoggerHealth.Debug(\"Responding health state of Adapter as HEALTHY\")\n\t\t\treturn &healthservice.HealthCheckResponse{Status: healthservice.HealthCheckResponse_SERVING}, nil\n\t\t}\n\t\tlogger.LoggerHealth.Debug(\"Responding health state of Adapter as NOT_HEALTHY\")\n\t\treturn &healthservice.HealthCheckResponse{Status: healthservice.HealthCheckResponse_NOT_SERVING}, nil\n\t}\n\n\t// health of the component of a server\n\tif isHealthy, ok := serviceHealthStatus[request.Service]; ok {\n\t\tif isHealthy {\n\t\t\tlogger.LoggerHealth.Debugf(\"Responding health state of Adapter service \\\"%s\\\" as HEALTHY\", request.Service)\n\t\t\treturn &healthservice.HealthCheckResponse{Status: healthservice.HealthCheckResponse_SERVING}, nil\n\t\t}\n\t\tlogger.LoggerHealth.Debugf(\"Responding health state of Adapter service \\\"%s\\\" as NOT_HEALTHY\", request.Service)\n\t\treturn &healthservice.HealthCheckResponse{Status: healthservice.HealthCheckResponse_NOT_SERVING}, nil\n\t}\n\n\t// no component found\n\tlogger.LoggerHealth.Debugf(\"Responding health state of Adapter service \\\"%s\\\" as UNKNOWN\", request.Service)\n\treturn &healthservice.HealthCheckResponse{Status: healthservice.HealthCheckResponse_UNKNOWN}, nil\n}", "func (c *DogHouseClient) Check(ctx context.Context, req *doghouse.CheckRequest) (*doghouse.CheckResponse, error) {\n\tcheckURL := c.BaseURL.String() + \"/check\"\n\tb, err := json.Marshal(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thttpReq, err := http.NewRequest(http.MethodPost, checkURL, bytes.NewReader(b))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thttpReq = httpReq.WithContext(ctx)\n\thttpReq.Header.Set(\"Content-Type\", \"application/json\")\n\thttpReq.Header.Set(\"User-Agent\", fmt.Sprintf(\"reviewdog/%s\", commands.Version))\n\n\thttpResp, err := c.Client.Do(httpReq)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Check request failed: %w\", err)\n\t}\n\tdefer httpResp.Body.Close()\n\n\trespb, err := ioutil.ReadAll(httpResp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif httpResp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"status=%v: %s\", httpResp.StatusCode, respb)\n\t}\n\n\tvar resp doghouse.CheckResponse\n\tif err := json.Unmarshal(respb, &resp); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to decode response: error=%w, resp=%s\", err, respb)\n\t}\n\treturn &resp, nil\n}", "func (t *TCPChecker) Check(ctx *context.Context, extConfig external.Check) pkg.Results {\n\tc := extConfig.(v1.TCPCheck)\n\tresult := pkg.Success(c, ctx.Canary)\n\tvar results pkg.Results\n\tresults = append(results, result)\n\n\tif connection, err := duty.FindConnectionByURL(ctx, db.Gorm, c.Endpoint); err != nil {\n\t\treturn results.Failf(\"failed to find TCP endpoint from connection %q: %v\", c.Endpoint, err)\n\t} else if connection != nil {\n\t\tc.Endpoint = connection.URL\n\t}\n\n\taddr, port, err := extractAddrAndPort(c.Endpoint)\n\tif err != nil {\n\t\treturn results.ErrorMessage(err)\n\t}\n\n\ttimeout := time.Millisecond * time.Duration(c.ThresholdMillis)\n\tconn, err := net.DialTimeout(\"tcp\", net.JoinHostPort(addr, port), timeout)\n\tif err != nil {\n\t\treturn results.Failf(\"Connection error: %s\", err)\n\t}\n\tif conn != nil {\n\t\tdefer conn.Close()\n\t}\n\treturn results\n}", "func (s *healthServer) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {\n\tlog.Printf(\"Handling grpc Check request\")\n\treturn &healthpb.HealthCheckResponse{Status: healthpb.HealthCheckResponse_SERVING}, nil\n}", "func (s *WorkersService) Check() (CheckResult, error) {\n\tclient := &http.Client{\n\t\tTimeout: clientTimeout,\n\t}\n\n\treq, err := http.NewRequest(http.MethodGet, s.url, nil)\n\tq := req.URL.Query()\n\tq.Add(OSKeyName, runtime.GOOS)\n\tq.Add(ArchitectureKeyName, runtime.GOARCH)\n\tq.Add(ClientVersionName, s.currentVersion)\n\n\tif s.opts.IsBeta {\n\t\tq.Add(BetaKeyName, \"true\")\n\t}\n\n\tif s.opts.RequestedVersion != \"\" {\n\t\tq.Add(VersionKeyName, s.opts.RequestedVersion)\n\t}\n\n\treq.URL.RawQuery = q.Encode()\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar v VersionResponse\n\tif err := json.NewDecoder(resp.Body).Decode(&v); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif v.Error != \"\" {\n\t\treturn nil, errors.New(v.Error)\n\t}\n\n\tversionToUpdate := \"\"\n\tif v.ShouldUpdate {\n\t\tversionToUpdate = v.Version\n\t}\n\n\treturn NewWorkersVersion(v.URL, versionToUpdate, v.Checksum, s.targetPath, v.UserMessage, v.IsCompressed), nil\n}", "func (*server) Check() healthgrpc.HealthCheckResponse_ServingStatus {\n\treturn healthgrpc.HealthCheckResponse_SERVING\n}", "func (sc ServiceCheck) Check() error {\n\tif len(sc.Name) == 0 {\n\t\treturn fmt.Errorf(\"statsd.ServiceCheck name is required\")\n\t}\n\tif byte(sc.Status) < 0 || byte(sc.Status) > 3 {\n\t\treturn fmt.Errorf(\"statsd.ServiceCheck status has invalid value\")\n\t}\n\treturn nil\n}", "func healthcheck(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n}", "func (fwdclient *Client) HealthCheck() error {\n\tlog.Debugf(\"%s: url=%s\", fwdclient.AppName, fwdclient.ActionUrls.Health)\n\treq, err := http.NewRequest(\"GET\", fwdclient.ActionUrls.Health, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Splunk %s\", fwdclient.Token))\n\tresp, err := fwdclient.httpclient.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\" Please check splunk authorization token. %s: Health check failed: %s\", fwdclient.AppName, err)\n\t}\n\tdefer resp.Body.Close()\n\tlog.Debugf(\"%s: status=%d %s\", fwdclient.AppName, resp.StatusCode, http.StatusText(resp.StatusCode))\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"%s: Failed during Health check : %d %s\", fwdclient.AppName, resp.StatusCode, http.StatusText(resp.StatusCode))\n\t}\n\trespBody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s: Failed while reading health response body: %s\", fwdclient.AppName, err)\n\t}\n\thealthCheckResponse := new(HealthCheckResponse)\n\tif err := json.Unmarshal(respBody, healthCheckResponse); err != nil {\n\t\treturn fmt.Errorf(\"%s: health check failed: the response is not JSON but: %s\", fwdclient.AppName, respBody)\n\t}\n\tlog.Debugf(\"%s: code=%d, text=%s\", fwdclient.AppName, healthCheckResponse.Code, healthCheckResponse.Text)\n\treturn nil\n}", "func (e AuthService) Check() (bool, error) {\n\turl := \"/authentication\"\n\n\tresp, err := e.client.MakeRequest(\n\t\t\"GET\",\n\t\turl,\n\t\t0,\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer resp.Body.Close()\n\n\tswitch resp.StatusCode {\n\tcase http.StatusOK:\n\t\treturn true, nil\n\tcase http.StatusUnauthorized:\n\t\tfallthrough\n\tcase http.StatusForbidden:\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, e.client.handleUnexpectedResponse(resp)\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a string, this tests variants of buffer conversion: string with trailing 0's, string exactly filling the slice passed to CFieldString (simulating an exactlyfull field), and first character (exactly filling the field).
func teststring(t *testing.T, s string) { buf := toint8(s) r := kstat.CFieldString(buf[:]) if r != s { t.Fatalf("full buf mismatch: %q vs %q", s, r) } r = kstat.CFieldString(buf[:len(s)]) if r != s { t.Fatalf("exact buf mismatch: %q vs %q", s, r) } r = kstat.CFieldString(buf[:len(s)+1]) if r != s { t.Fatalf("string + one null mismatch: %q vs %q", s, r) } if len(s) > 1 { r = kstat.CFieldString(buf[:1]) if r != s[:1] { t.Fatalf("first character mismatch: %q vs %q", s[:1], r) } } }
[ "func cString(b []byte) string {\n\tn := 0\n\tfor n < len(b) && b[n] != 0 {\n\t\tn++\n\t}\n\treturn string(b[0:n])\n}", "func cstring(b []byte) string {\n\tvar i int\n\tfor i = 0; i < len(b) && b[i] != 0; i++ {\n\t}\n\treturn string(b[:i])\n}", "func ReadString(b []byte) (string, error) {\n\tfor i, c := range b {\n\t\tif c == 0 {\n\t\t\treturn string(b[:i]), nil\n\t\t}\n\t}\n\n\treturn \"\", wlerr.Errorf(\"failed to parse string: no null terminator\")\n}", "func (suite *RunePartTestSuite) TestReadToZeroLengthBuffer() {\n\tpart := runePart{runeVal: 'a'}\n\tbuff := make([]byte, 0, 0)\n\tcount, err := part.Read(buff)\n\tsuite.Nil(err)\n\tsuite.Equal(0, count)\n\tsuite.Equal(\"\", string(buff))\n}", "func (mp *MysqlProtocolImpl) readStringNUL(data []byte, pos int) (string, int, bool) {\n\tzeroPos := bytes.IndexByte(data[pos:], 0)\n\tif zeroPos == -1 {\n\t\treturn \"\", 0, false\n\t}\n\treturn string(data[pos : pos+zeroPos]), pos + zeroPos + 1, true\n}", "func TestStringZeroForNotEmptyString(t *testing.T) {\n\n\t// Arrange.\n\n\ts := \"Hello utilities\"\n\n\t// Act.\n\n\tresult := IsZero(s)\n\n\t// Assert.\n\n\tassert.False(t, result)\n}", "func parseString(buf []byte) string {\n\tpos := bytes.IndexByte(buf, '\\x00')\n\tif pos == -1 {\n\t\tpanic(fmt.Errorf(\"unable to locate NULL-terminated string in % 02X\", buf))\n\t}\n\treturn string(buf[:pos])\n}", "func fillString(t *testing.T, testname string, b *Builder, s string, n int, fus string) string {\n\tcheckRead(t, testname+\" (fill 1)\", b, s)\n\tfor ; n > 0; n-- {\n\t\tm, err := b.WriteString(fus)\n\t\tif m != len(fus) {\n\t\t\tt.Errorf(testname+\" (fill 2): m == %d, expected %d\", m, len(fus))\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Errorf(testname+\" (fill 3): err should always be nil, found err == %s\", err)\n\t\t}\n\t\ts += fus\n\t\tcheckRead(t, testname+\" (fill 4)\", b, s)\n\t}\n\treturn s\n}", "func testFCString(t testing.TB) {\n\tvar line = \"9900000100000007000000010000000000100000Contact Name 55586755520 \"\n\tr := NewReader(strings.NewReader(line))\n\tr.line = line\n\terr := r.parseFileControl()\n\tif err != nil {\n\t\tt.Errorf(\"%T: %s\", err, err)\n\t\tlog.Fatal(err)\n\t}\n\trecord := r.File.Control\n\tif record.String() != line {\n\t\tt.Errorf(\"\\nStrings do not match %s\\n %s\", line, record.String())\n\t}\n}", "func TestCompressStr(t *testing.T) {\n\tif CompressStr(\"aabcccccaaa\") != \"a2b1c5a3\" {\n\t\tt.Error()\n\t}\n\tif CompressStr(\"abcdefgh\") != \"abcdefgh\" {\n\t\tt.Error()\n\t}\n\tif CompressStr(\"\") != \"\" {\n\t\tt.Error()\n\t}\n}", "func fillString(retunString string, toLength int) string {\n\tfor {\n\t\tlengtString := len(retunString)\n\t\tif lengtString < toLength {\n\t\t\tretunString = retunString + \":\"\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\treturn retunString\n}", "func stringReset(tls *libc.TLS, p uintptr) { /* mptest.c:437:13: */\n\tif (*String)(unsafe.Pointer(p)).z == uintptr(0) {\n\t\tstringAppend(tls, p, ts+143 /* \" \" */, 1)\n\t}\n\t(*String)(unsafe.Pointer(p)).n = 0\n\t*(*int8)(unsafe.Pointer((*String)(unsafe.Pointer(p)).z)) = int8(0)\n}", "func FixStringLen(slice string, x int) string {\n\tif len(slice) < x {\n\t\tfor len(slice) < x {\n\t\t\t// add 0 to start of string\n\t\t\ts1 := strings.Repeat(\"0\", x-len(slice)) + slice\n\t\t\tout := s1\n\t\t\tdebug.Printf(\"FixStringLen if for: %v \", out)\n\t\t\treturn out\n\n\t\t}\n\t}\n\tout := slice\n\tdebug.Printf(\"FixStringLen: %v \", out)\n\treturn out\n\n}", "func bytesToString(b []byte) string {\n\tnulIndex := 0\n\tnulFound := false\n\tfor nulIndex = range b {\n\t\tif b[nulIndex] == 0 {\n\t\t\tnulFound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif nulFound {\n\t\treturn string(b[:nulIndex])\n\t}\n\treturn string(b)\n}", "func MustCString(s string) uintptr {\n\tn := len(s)\n\tp := MustMalloc(n + 1)\n\tcopy((*rawmem)(unsafe.Pointer(p))[:n], s)\n\t(*rawmem)(unsafe.Pointer(p))[n] = 0\n\treturn p\n}", "func fillString(message string, toLength int) string {\n\tfor {\n\t\tstringLength := len(message)\n\t\tif stringLength < toLength {\n\t\t\tmessage = message + \":\"\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\treturn message\n}", "func safeString(str string) string {\n\tif len(str) > 0 && str[len(str)-1] != '\\x00' {\n\t\tstr = str + \"\\x00\"\n\t} else if len(str) == 0 {\n\t\tstr = \"\\x00\"\n\t}\n\treturn str\n}", "func check(t *testing.T, testname string, buf *Buffer, s string) {\n\tbytes := buf.Bytes()\n\tstr := buf.String()\n\tif buf.Len() != len(bytes) {\n\t\tt.Errorf(\"%s: buf.Len() == %d, len(buf.Bytes()) == %d\", testname, buf.Len(), len(bytes))\n\t}\n\n\tif buf.Len() != len(str) {\n\t\tt.Errorf(\"%s: buf.Len() == %d, len(buf.String()) == %d\", testname, buf.Len(), len(str))\n\t}\n\n\tif buf.Len() != len(s) {\n\t\tt.Errorf(\"%s: buf.Len() == %d, len(s) == %d\", testname, buf.Len(), len(s))\n\t}\n\n\tif string(bytes) != s {\n\t\tt.Errorf(\"%s: string(buf.Bytes()) == %q, s == %q\", testname, string(bytes), s)\n\t}\n}", "func cString(data []byte) (s string, orig string) {\n\t// Find 0x00 byte:\n\tfor i, ch := range data {\n\t\tif ch == 0 {\n\t\t\tdata = data[:i] // excludes terminating 0x00\n\n\t\t\tif !utf8.Valid(data) {\n\t\t\t\t// Try korean\n\t\t\t\tif krdata, err := koreanDecoder.Bytes(data); err == nil {\n\t\t\t\t\treturn string(krdata), string(data)\n\t\t\t\t}\n\t\t\t}\n\t\t\tbreak // Either UTF-8 or custom decoding failed\n\t\t}\n\t}\n\n\t// Return data as string.\n\t// We end up here if:\n\t// - no terminating 0 char found,\n\t// - or string is valid UTF-8,\n\t// - or it is invalid but custom decoding failed\n\t// Either way:\n\ts = string(data)\n\treturn s, s\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates settings for a FlexMatch matchmaking configuration. These changes affect all matches and game sessions that are created after the update. To update settings, specify the configuration name to be updated and provide the new settings. Learn more Design a FlexMatch matchmaker (
func (c *Client) UpdateMatchmakingConfiguration(ctx context.Context, params *UpdateMatchmakingConfigurationInput, optFns ...func(*Options)) (*UpdateMatchmakingConfigurationOutput, error) { if params == nil { params = &UpdateMatchmakingConfigurationInput{} } result, metadata, err := c.invokeOperation(ctx, "UpdateMatchmakingConfiguration", params, optFns, c.addOperationUpdateMatchmakingConfigurationMiddlewares) if err != nil { return nil, err } out := result.(*UpdateMatchmakingConfigurationOutput) out.ResultMetadata = metadata return out, nil }
[ "func (runner *McRunner) applySettings() {\n\tpropPath := filepath.Join(McServerPath(), \"server.properties\")\n\tprops, err := ioutil.ReadFile(propPath)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tnameExp, _ := regexp.Compile(\"displayname=.*\\\\n\")\n\tmotdExp, _ := regexp.Compile(\"motd=.*\\\\n\")\n\tmaxPlayersExp, _ := regexp.Compile(\"max-players=.*\\\\n\")\n\tportExp, _ := regexp.Compile(\"server-port=.*\\\\n\")\n\n\tname := fmt.Sprintf(\"displayname=%s\\n\", runner.Settings.Name)\n\tmotd := fmt.Sprintf(\"motd=%s\\n\", runner.Settings.MOTD)\n\tmaxPlayers := fmt.Sprintf(\"max-players=%d\\n\", runner.Settings.MaxPlayers)\n\tport := fmt.Sprintf(\"server-port=%d\\n\", runner.Settings.Port)\n\n\tnewProps := strings.Replace(string(props), nameExp.FindString(string(props)), name, 1)\n\tnewProps = strings.Replace(newProps, motdExp.FindString(newProps), motd, 1)\n\tnewProps = strings.Replace(newProps, maxPlayersExp.FindString(newProps), maxPlayers, 1)\n\tnewProps = strings.Replace(newProps, portExp.FindString(newProps), port, 1)\n\n\terr = ioutil.WriteFile(propPath, []byte(newProps), 0644)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n}", "func (fm *FakeManager) UpdateConfiguration(throttlerName string, configuration *throttlerdatapb.Configuration, copyZeroValues bool) ([]string, error) {\n\tpanic(panicMsg)\n}", "func (x *Rest) ConfigurationUpdate(w http.ResponseWriter, r *http.Request,\n\tparams httprouter.Params) {\n\tdefer panicCatcher(w)\n\n\trequest := msg.New(r, params)\n\trequest.Section = msg.SectionConfiguration\n\trequest.Action = msg.ActionUpdate\n\n\tswitch request.Version {\n\tcase msg.ProtocolOne:\n\t\tcReq := &v1.ConfigurationItem{}\n\t\tif err := decodeJSONBody(r, cReq); err != nil {\n\t\t\tx.replyUnprocessableEntity(&w, &request, err)\n\t\t\treturn\n\t\t}\n\t\trequest.Configuration = v2.ConfigurationFromV1(cReq)\n\n\tcase msg.ProtocolTwo:\n\t\tcReq := v2.NewConfigurationRequest()\n\t\tif err := decodeJSONBody(r, &cReq); err != nil {\n\t\t\tx.replyUnprocessableEntity(&w, &request, err)\n\t\t\treturn\n\t\t}\n\t\trequest.Configuration = *cReq.Configuration\n\n\t\t// only the v2 API has request flags\n\t\tif err := resolveFlags(&cReq, &request); err != nil {\n\t\t\tx.replyBadRequest(&w, &request, err)\n\t\t\treturn\n\t\t}\n\n\tdefault:\n\t\tx.replyInternalError(&w, &request, nil)\n\t\treturn\n\t}\n\n\trequest.Configuration.InputSanatize()\n\trequest.LookupHash = calculateLookupID(\n\t\trequest.Configuration.HostID,\n\t\trequest.Configuration.Metric,\n\t)\n\trequest.Configuration.LookupID = request.LookupHash\n\n\tif request.Configuration.ID != strings.ToLower(params.ByName(`ID`)) {\n\t\tx.replyBadRequest(&w, &request, fmt.Errorf(\n\t\t\t\"Mismatched IDs in update: [%s] vs [%s]\",\n\t\t\trequest.Configuration.ID,\n\t\t\tstrings.ToLower(params.ByName(`ID`)),\n\t\t))\n\t}\n\n\tif _, err := uuid.FromString(request.Configuration.ID); err != nil {\n\t\tx.replyBadRequest(&w, &request, err)\n\t\treturn\n\t}\n\n\tx.somaSetFeedbackURL(&request)\n\n\tif !x.isAuthorized(&request) {\n\t\tx.replyForbidden(&w, &request, nil)\n\t\treturn\n\t}\n\n\thandler := x.handlerMap.Get(`configuration_w`)\n\thandler.Intake() <- request\n\tresult := <-request.Reply\n\tx.respond(&w, &result)\n}", "func (o Options) UpdateConfig(cfg engine.Interface) error {\n\truntimes := operator.GetRuntimes(\n\t\toperator.WithNvidiaRuntimeName(o.RuntimeName),\n\t\toperator.WithSetAsDefault(o.SetAsDefault),\n\t\toperator.WithRoot(o.RuntimeDir),\n\t)\n\tfor name, runtime := range runtimes {\n\t\terr := cfg.AddRuntime(name, runtime.Path, runtime.SetAsDefault)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to update runtime %q: %v\", name, err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func UpdateConfig(config map[string]interface{}, runtimeName string, runtimePath string, setAsDefault bool) error {\n\t// Read the existing runtimes\n\truntimes := make(map[string]interface{})\n\tif _, exists := config[\"runtimes\"]; exists {\n\t\truntimes = config[\"runtimes\"].(map[string]interface{})\n\t}\n\n\t// Add / update the runtime definitions\n\truntimes[runtimeName] = map[string]interface{}{\n\t\t\"path\": runtimePath,\n\t\t\"args\": []string{},\n\t}\n\n\t// Update the runtimes definition\n\tif len(runtimes) > 0 {\n\t\tconfig[\"runtimes\"] = runtimes\n\t}\n\n\tif setAsDefault {\n\t\tconfig[\"default-runtime\"] = runtimeName\n\t}\n\n\treturn nil\n}", "func (*XMLDocument) UpdateSettings() {\n\tmacro.Rewrite(\"$_.updateSettings()\")\n}", "func (k *KeyGC) UpdateSettings(maxRequestedTTL time.Duration, keysPerAccount int, tx storage.Tx) error {\n\tkeyTTL := timeutil.KeyTTL(maxRequestedTTL, keysPerAccount)\n\tsettings := &pb.Process_Params{\n\t\tIntParams: map[string]int64{\n\t\t\t\"maxRequestedTtl\": int64(maxRequestedTTL.Seconds()),\n\t\t\t\"keysPerAccount\": int64(keysPerAccount),\n\t\t\t\"keyTtl\": int64(keyTTL.Seconds()),\n\t\t},\n\t}\n\tscheduleFrequency := keyTTL / 10\n\tif scheduleFrequency > maxKeyScheduleFrequency {\n\t\tscheduleFrequency = maxKeyScheduleFrequency\n\t}\n\treturn k.process.UpdateSettings(scheduleFrequency, settings, tx)\n}", "func setConfig(fname string, request url.URL) error {\n\tq := request.Query()\n\tname := q.Get(\"config\")\n\tif name == \"\" {\n\t\treturn fmt.Errorf(\"invalid config name\")\n\t}\n\tcfg := currentConfig()\n\tif err := cfg.applyURL(q); err != nil {\n\t\treturn err\n\t}\n\treturn editSettings(fname, func(s *settings) error {\n\t\tfor i, c := range s.Configs {\n\t\t\tif c.Name == name {\n\t\t\t\ts.Configs[i].config = cfg\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\ts.Configs = append(s.Configs, namedConfig{Name: name, config: cfg})\n\t\treturn nil\n\t})\n}", "func updateConfig(w http.ResponseWriter, r *http.Request, updateUrl string) {\n\tnewGenTimeout, _ := strconv.Atoi(r.FormValue(\"generate_timeout\"))\n\tif newGenTimeout > 0 {\n\t\tchain.GetServerChain().SetGenerationTimeout(newGenTimeout)\n\t\tviper.Set(\"server_chain.block.generation.timeout\", newGenTimeout)\n\t}\n\tnewTxnWaitTime, _ := strconv.Atoi(r.FormValue(\"txn_wait_time\"))\n\tif newTxnWaitTime > 0 {\n\t\tchain.GetServerChain().SetRetryWaitTime(newTxnWaitTime)\n\t\tviper.Set(\"server_chain.block.generation.retry_wait_time\", newTxnWaitTime)\n\t}\n\tw.Header().Set(\"Content-Type\", \"text/html;charset=UTF-8\")\n\tfmt.Fprintf(w, \"<form action='%s' method='post'>\", updateUrl)\n\tfmt.Fprintf(w, \"Generation Timeout (time till a miner makes a block with less than max blocksize): <input type='text' name='generate_timeout' value='%v'><br>\", viper.Get(\"server_chain.block.generation.timeout\"))\n\tfmt.Fprintf(w, \"Retry Wait Time (time miner waits if there aren't enough transactions to reach max blocksize): <input type='text' name='txn_wait_time' value='%v'><br>\", viper.Get(\"server_chain.block.generation.retry_wait_time\"))\n\tfmt.Fprintf(w, \"<input type='submit' value='Submit'>\")\n\tfmt.Fprintf(w, \"</form>\")\n}", "func NewMatchConfig() *MatchConfig {\n\treturn &MatchConfig{\n\t\tInputQueueSize: 128,\n\t\tCallQueueSize: 128,\n\t\tJoinAttemptQueueSize: 128,\n\t\tDeferredQueueSize: 128,\n\t\tJoinMarkerDeadlineMs: 15000,\n\t\tMaxEmptySec: 0,\n\t}\n}", "func (h *WLSHandler) UpdateConfiguration(config *config.Configuration) {\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\th.targetConfig = config\n}", "func updateConfig(config *rainsd.Config) {\n\tif rootCmd.Flag(\"rootZonePublicKeyPath\").Changed {\n\t\tconfig.RootZonePublicKeyPath = rootZonePublicKeyPath\n\t}\n\tif rootCmd.Flag(\"assertionCheckPointInterval\").Changed {\n\t\tconfig.AssertionCheckPointInterval = assertionCheckPointInterval\n\t}\n\tif rootCmd.Flag(\"negAssertionCheckPointInterval\").Changed {\n\t\tconfig.NegAssertionCheckPointInterval = negAssertionCheckPointInterval\n\t}\n\tif rootCmd.Flag(\"zoneKeyCheckPointInterval\").Changed {\n\t\tconfig.ZoneKeyCheckPointInterval = zoneKeyCheckPointInterval\n\t}\n\tif rootCmd.Flag(\"checkPointPath\").Changed {\n\t\tconfig.CheckPointPath = checkPointPath\n\t}\n\tif rootCmd.Flag(\"preLoadCaches\").Changed {\n\t\tconfig.PreLoadCaches = preLoadCaches\n\t}\n\tif rootCmd.Flag(\"serverAddress\").Changed {\n\t\tconfig.ServerAddress = serverAddress.value\n\t}\n\tif rootCmd.Flag(\"maxConnections\").Changed {\n\t\tconfig.MaxConnections = maxConnections\n\t}\n\tif rootCmd.Flag(\"keepAlivePeriod\").Changed {\n\t\tconfig.KeepAlivePeriod = keepAlivePeriod\n\t}\n\tif rootCmd.Flag(\"tcpTimeout\").Changed {\n\t\tconfig.TCPTimeout = tcpTimeout\n\t}\n\tif rootCmd.Flag(\"tlsCertificateFile\").Changed {\n\t\tconfig.TLSCertificateFile = tlsCertificateFile\n\t}\n\tif rootCmd.Flag(\"tlsPrivateKeyFile\").Changed {\n\t\tconfig.TLSPrivateKeyFile = tlsPrivateKeyFile\n\t}\n\tif rootCmd.Flag(\"prioBufferSize\").Changed {\n\t\tconfig.PrioBufferSize = prioBufferSize\n\t}\n\tif rootCmd.Flag(\"normalBufferSize\").Changed {\n\t\tconfig.NormalBufferSize = normalBufferSize\n\t}\n\tif rootCmd.Flag(\"notificationBufferSize\").Changed {\n\t\tconfig.NotificationBufferSize = notificationBufferSize\n\t}\n\tif rootCmd.Flag(\"prioWorkerCount\").Changed {\n\t\tconfig.PrioWorkerCount = prioWorkerCount\n\t}\n\tif rootCmd.Flag(\"normalWorkerCount\").Changed {\n\t\tconfig.NormalWorkerCount = normalWorkerCount\n\t}\n\tif rootCmd.Flag(\"notificationWorkerCount\").Changed {\n\t\tconfig.NotificationWorkerCount = notificationWorkerCount\n\t}\n\tif rootCmd.Flag(\"capabilitiesCacheSize\").Changed {\n\t\tconfig.CapabilitiesCacheSize = capabilitiesCacheSize\n\t}\n\tif rootCmd.Flag(\"capabilities\").Changed {\n\t\tconfig.Capabilities = []message.Capability{message.Capability(capabilities)}\n\t}\n\tif rootCmd.Flag(\"zoneKeyCacheSize\").Changed {\n\t\tconfig.ZoneKeyCacheSize = zoneKeyCacheSize\n\t}\n\tif rootCmd.Flag(\"zoneKeyCacheWarnSize\").Changed {\n\t\tconfig.ZoneKeyCacheWarnSize = zoneKeyCacheWarnSize\n\t}\n\tif rootCmd.Flag(\"maxPublicKeysPerZone\").Changed {\n\t\tconfig.MaxPublicKeysPerZone = maxPublicKeysPerZone\n\t}\n\tif rootCmd.Flag(\"pendingKeyCacheSize\").Changed {\n\t\tconfig.PendingKeyCacheSize = pendingKeyCacheSize\n\t}\n\tif rootCmd.Flag(\"delegationQueryValidity\").Changed {\n\t\tconfig.DelegationQueryValidity = delegationQueryValidity\n\t}\n\tif rootCmd.Flag(\"reapZoneKeyCacheInterval\").Changed {\n\t\tconfig.ReapZoneKeyCacheInterval = reapZoneKeyCacheInterval\n\t}\n\tif rootCmd.Flag(\"reapPendingKeyCacheInterval\").Changed {\n\t\tconfig.ReapPendingKeyCacheInterval = reapPendingKeyCacheInterval\n\t}\n\tif rootCmd.Flag(\"assertionCacheSize\").Changed {\n\t\tconfig.AssertionCacheSize = assertionCacheSize\n\t}\n\tif rootCmd.Flag(\"negativeAssertionCacheSize\").Changed {\n\t\tconfig.NegativeAssertionCacheSize = negativeAssertionCacheSize\n\t}\n\tif rootCmd.Flag(\"pendingQueryCacheSize\").Changed {\n\t\tconfig.PendingQueryCacheSize = pendingQueryCacheSize\n\t}\n\tif rootCmd.Flag(\"authorities\").Changed {\n\t\tconfig.Authorities = authorities.value\n\t}\n\tif rootCmd.Flag(\"maxAssertionValidity\").Changed {\n\t\tconfig.MaxCacheValidity.AssertionValidity = maxAssertionValidity\n\t}\n\tif rootCmd.Flag(\"maxShardValidity\").Changed {\n\t\tconfig.MaxCacheValidity.ShardValidity = maxShardValidity\n\t}\n\tif rootCmd.Flag(\"maxPshardValidity\").Changed {\n\t\tconfig.MaxCacheValidity.PshardValidity = maxPshardValidity\n\t}\n\tif rootCmd.Flag(\"maxZoneValidity\").Changed {\n\t\tconfig.MaxCacheValidity.ZoneValidity = maxZoneValidity\n\t}\n\tif rootCmd.Flag(\"reapAssertionCacheInterval\").Changed {\n\t\tconfig.ReapAssertionCacheInterval = reapAssertionCacheInterval\n\t}\n\tif rootCmd.Flag(\"reapNegAssertionCacheInterval\").Changed {\n\t\tconfig.ReapNegAssertionCacheInterval = reapNegAssertionCacheInterval\n\t}\n\tif rootCmd.Flag(\"reapPendingQCacheInterval\").Changed {\n\t\tconfig.ReapPendingQCacheInterval = reapPendingQCacheInterval\n\t}\n}", "func UpdateSetting(pool *redis.Pool, registry *pubsub.Registry) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tchannel := chi.URLParam(r, \"channel\")\n\t\tsetting := chi.URLParam(r, \"setting\")\n\n\t\tconn := pool.Get()\n\t\tdefer func() { _ = conn.Close() }()\n\n\t\t// Load the existing settings for the channel so that we can apply the\n\t\t// updates to them.\n\t\tsettings, err := GetSettings(conn, channel)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"unable to load spelling bee settings for channel %s: %+v\", channel, err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t// Apply the update to the settings in memory.\n\t\tvar shouldRebuildWordMap bool\n\t\tswitch setting {\n\t\tcase \"allow_unofficial_answers\":\n\t\t\tvar value bool\n\t\t\tif err := render.DecodeJSON(r.Body, &value); err != nil {\n\t\t\t\tlog.Printf(\"unable to parse spelling bee allow unofficial answers setting json %v: %+v\", value, err)\n\t\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsettings.AllowUnofficialAnswers = value\n\t\t\tshouldRebuildWordMap = true\n\n\t\tcase \"font_size\":\n\t\t\tvar value model.FontSize\n\t\t\tif err := render.DecodeJSON(r.Body, &value); err != nil {\n\t\t\t\tlog.Printf(\"unable to parse spelling bee font size setting json %s: %+v\", value, err)\n\t\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsettings.FontSize = value\n\n\t\tcase \"show_answer_placeholders\":\n\t\t\tvar value bool\n\t\t\tif err := render.DecodeJSON(r.Body, &value); err != nil {\n\t\t\t\tlog.Printf(\"unable to parse spelling bee show answer placeholders setting json %v: %+v\", value, err)\n\t\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsettings.ShowAnswerPlaceholders = value\n\n\t\tdefault:\n\t\t\tlog.Printf(\"unrecognized spelling bee setting name %s\", setting)\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\t// Save the settings back to the database.\n\t\tif err = SetSettings(conn, channel, settings); err != nil {\n\t\t\tlog.Printf(\"unable to save spelling bee settings for channel %s: %+v\", channel, err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t// Load the state and rebuild the word map if we changed a setting that\n\t\t// requires this. We do this after the setting is applied so that if there\n\t\t// was an error earlier we don't modify the solve's state.\n\t\tvar updatedState *State\n\t\tif shouldRebuildWordMap {\n\t\t\tstate, err := GetState(conn, channel)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"unable to load state for channel %s: %+v\", channel, err)\n\t\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// There's no need to update cells if the puzzle hasn't been selected or\n\t\t\t// started or is already complete.\n\t\t\tstatus := state.Status\n\t\t\tif status != model.StatusCreated && status != model.StatusSelected && status != model.StatusComplete {\n\t\t\t\tstate.RebuildWordMap(settings.AllowUnofficialAnswers)\n\n\t\t\t\t// We may have just solved the puzzle -- if so then we should stop the\n\t\t\t\t// timer before saving the state.\n\t\t\t\tif state.Status == model.StatusComplete {\n\t\t\t\t\tnow := time.Now()\n\t\t\t\t\ttotal := state.TotalSolveDuration.Nanoseconds() + now.Sub(*state.LastStartTime).Nanoseconds()\n\t\t\t\t\tstate.LastStartTime = nil\n\t\t\t\t\tstate.TotalSolveDuration = model.Duration{Duration: time.Duration(total)}\n\t\t\t\t}\n\n\t\t\t\tif err := SetState(conn, channel, state); err != nil {\n\t\t\t\t\tlog.Printf(\"unable to save state for channel %s: %+v\", channel, err)\n\t\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tupdatedState = &state\n\t\t\t}\n\t\t}\n\n\t\t// Now broadcast the new settings to all of the clients in the channel.\n\t\tregistry.Publish(ChannelID(channel), SettingsEvent(settings))\n\n\t\tif updatedState != nil {\n\t\t\t// Broadcast the updated state to all of the clients, making sure to not\n\t\t\t// include the answers.\n\t\t\tupdatedState.Puzzle = updatedState.Puzzle.WithoutAnswers()\n\n\t\t\tregistry.Publish(ChannelID(channel), StateEvent(*updatedState))\n\n\t\t\t// Since we updated the state, we may have also just solved the puzzle.\n\t\t\t// If we did then we should also send a complete message.\n\t\t\tif updatedState.Status == model.StatusComplete {\n\t\t\t\tregistry.Publish(ChannelID(channel), CompleteEvent())\n\t\t\t}\n\t\t}\n\n\t\tw.WriteHeader(http.StatusOK)\n\t}\n}", "func (b *Backend) ApplyConfiguration(config gw.GatewayConfiguration) error {\n\tfor i := range config.Channels {\n\t\tloRaModConfig := config.Channels[i].GetLoraModulationConfig()\n\t\tif loRaModConfig != nil {\n\t\t\tloRaModConfig.Bandwidth = loRaModConfig.Bandwidth * 1000\n\t\t}\n\n\t\tfskModConfig := config.Channels[i].GetFskModulationConfig()\n\t\tif fskModConfig != nil {\n\t\t\tfskModConfig.Bandwidth = fskModConfig.Bandwidth * 1000\n\t\t}\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"version\": config.Version,\n\t}).Info(\"backend/concentratord: forwarding configuration command\")\n\n\t_, err := b.commandRequest(\"config\", &config)\n\tif err != nil {\n\t\tlog.WithError(err).Fatal(\"backend/concentratord: send configuration command error\")\n\t}\n\n\tcommandCounter(\"config\").Inc()\n\n\treturn nil\n}", "func (api *API) UpdateConfig(request *restful.Request, response *restful.Response) {\n\n\t// ToDo: check url name matches body name\n\n\tparams := request.PathParameters()\n\tk, err := setup(params)\n\tif err != nil {\n\t\tapi.writeError(http.StatusBadRequest, err.Error(), response)\n\t\treturn\n\t}\n\n\tconfig := &Config{}\n\tif err = request.ReadEntity(config); err != nil {\n\t\tapi.writeError(http.StatusBadRequest, err.Error(), response)\n\t\treturn\n\t}\n\n\tif err = config.ParseSpec(); err != nil {\n\t\tapi.writeError(http.StatusBadRequest, err.Error(), response)\n\t\treturn\n\t}\n\n\tglog.V(2).Infof(\"Updating config in Istio registry: key %+v, config %+v\", k, config)\n\n\t// TODO: incorrect use with new registry\n\tif _, err = api.registry.Put(config.ParsedSpec, \"\"); err != nil {\n\t\tswitch err.(type) {\n\t\tcase *model.ItemNotFoundError:\n\t\t\tapi.writeError(http.StatusNotFound, err.Error(), response)\n\t\tdefault:\n\t\t\tapi.writeError(http.StatusInternalServerError, err.Error(), response)\n\t\t}\n\t\treturn\n\t}\n\tglog.V(2).Infof(\"Updated config to %+v\", config)\n\tif err = response.WriteHeaderAndEntity(http.StatusOK, config); err != nil {\n\t\tapi.writeError(http.StatusInternalServerError, err.Error(), response)\n\t}\n}", "func (settings *Settings) Update(newSettings *Settings) {\n\tif settings == nil {\n\t\treturn\n\t}\n\tif newSettings.ViewsPath == \"\" {\n\t\tsettings.ViewsPath = \".\"\n\t} else {\n\t\tsettings.ViewsPath = newSettings.ViewsPath\n\t}\n\tif newSettings.StaticPath == \"\" {\n\t\tsettings.StaticPath = \".\"\n\t} else {\n\t\tsettings.StaticPath = newSettings.StaticPath\n\t}\n\tif newSettings.ViewExtension == \"\" {\n\t\tsettings.ViewExtension = \"html\"\n\t} else {\n\t\tsettings.ViewExtension = newSettings.ViewExtension\n\t}\n}", "func (sys *SystemInstance) UpdateProjectConfiguration(projectID int, presetID int, engineConfigurationID string) error {\n\tengineConfigID, _ := strconv.Atoi(engineConfigurationID)\n\n\tvar projectScanSettings ScanSettings\n\theader := http.Header{}\n\theader.Set(\"Content-Type\", \"application/json\")\n\tdata, err := sendRequest(sys, http.MethodGet, fmt.Sprintf(\"/sast/scanSettings/%v\", projectID), nil, header)\n\tif err != nil {\n\t\t// if an error happens, try to update the config anyway\n\t\tsys.logger.Warnf(\"Failed to fetch scan settings of project %v: %s\", projectID, err)\n\t} else {\n\t\t// Check if the current project config needs to be updated\n\t\tjson.Unmarshal(data, &projectScanSettings)\n\t\tif projectScanSettings.Preset.PresetID == presetID && (projectScanSettings.EngineConfiguration.EngineConfigurationID == engineConfigID || engineConfigID == 0) {\n\t\t\tsys.logger.Debugf(\"Project configuration does not need to be updated\")\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// use the project-level value to configure the project if no value was provided in piper config\n\tif engineConfigID == 0 {\n\t\tengineConfigID = projectScanSettings.EngineConfiguration.EngineConfigurationID\n\t}\n\n\tjsonData := map[string]interface{}{\n\t\t\"projectId\": projectID,\n\t\t\"presetId\": presetID,\n\t\t\"engineConfigurationId\": engineConfigID,\n\t}\n\n\tjsonValue, err := json.Marshal(jsonData)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"error marshalling project data\")\n\t}\n\n\t_, err = sendRequest(sys, http.MethodPost, \"/sast/scanSettings\", bytes.NewBuffer(jsonValue), header)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"request to checkmarx system failed\")\n\t}\n\tsys.logger.Debugf(\"Project configuration updated\")\n\n\treturn nil\n}", "func Update(section, option, value string) {\n\tcfg.Update(section, option, value)\n}", "func (client AppsClient) UpdateConfigurationSlotResponder(resp *http.Response) (result SiteConfigResource, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
StartEventSource starts an event source
func (ese *GitlabEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error { defer gateways.Recover(eventSource.Name) log := ese.Log.WithEventSource(eventSource.Name) log.Info("operating on event source") config, err := parseEventSource(eventSource.Data) if err != nil { log.WithError(err).Error("failed to parse event source") return err } gl := config.(*gitlabEventSource) return gwcommon.ProcessRoute(&RouteConfig{ route: &gwcommon.Route{ EventSource: eventSource, Logger: ese.Log, Webhook: gl.Hook, StartCh: make(chan struct{}), }, namespace: ese.Namespace, clientset: ese.Clientset, ges: gl, }, helper, eventStream) }
[ "func (ese *SlackEventSourceExecutor) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {\n\tdefer gateways.Recover(eventSource.Name)\n\n\tlog := ese.Log.WithEventSource(eventSource.Name)\n\tlog.Info(\"operating on event source\")\n\n\tconfig, err := parseEventSource(eventSource.Data)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"failed to parse event source\")\n\t\treturn err\n\t}\n\n\tses := config.(*slackEventSource)\n\n\ttoken, err := store.GetSecrets(ese.Clientset, ese.Namespace, ses.Token.Name, ses.Token.Key)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"failed to retrieve token\")\n\t\treturn err\n\t}\n\n\treturn gwcommon.ProcessRoute(&RouteConfig{\n\t\troute: &gwcommon.Route{\n\t\t\tLogger: ese.Log,\n\t\t\tStartCh: make(chan struct{}),\n\t\t\tWebhook: ses.Hook,\n\t\t\tEventSource: eventSource,\n\t\t},\n\t\ttoken: token,\n\t\tclientset: ese.Clientset,\n\t\tnamespace: ese.Namespace,\n\t\tses: ses,\n\t}, helper, eventStream)\n}", "func (listener *EventListener) StartEventSource(eventSource *gateways.EventSource, eventStream gateways.Eventing_StartEventSourceServer) error {\n\tlistener.Logger.WithField(common.LabelEventSource, eventSource.Name).Infoln(\"started processing the event source...\")\n\n\tchannels := server.NewChannels()\n\n\tgo server.HandleEventsFromEventSource(eventSource.Name, eventStream, channels, listener.Logger)\n\n\tdefer func() {\n\t\tchannels.Stop <- struct{}{}\n\t}()\n\n\tif err := listener.listenEvents(eventSource, channels); err != nil {\n\t\tlistener.Logger.WithField(common.LabelEventSource, eventSource.Name).WithError(err).Errorln(\"failed to listen to events\")\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *SourceControl) Start(sourceName *string, reply *bool) error {\n\t*reply = false\n\tif s.isSourceActive {\n\t\treturn fmt.Errorf(\"already have active source, do not start\")\n\t}\n\tname := strings.ToUpper(*sourceName)\n\tswitch name {\n\tcase \"SIMPULSESOURCE\":\n\t\ts.ActiveSource = DataSource(s.simPulses)\n\t\ts.status.SourceName = \"SimPulses\"\n\n\tcase \"TRIANGLESOURCE\":\n\t\ts.ActiveSource = DataSource(s.triangle)\n\t\ts.status.SourceName = \"Triangles\"\n\n\tcase \"LANCEROSOURCE\":\n\t\ts.ActiveSource = DataSource(s.lancero)\n\t\ts.status.SourceName = \"Lancero\"\n\n\tcase \"ROACHSOURCE\":\n\t\ts.ActiveSource = DataSource(s.roach)\n\t\ts.status.SourceName = \"Roach\"\n\n\tcase \"ABACOSOURCE\":\n\t\ts.ActiveSource = DataSource(s.abaco)\n\t\ts.status.SourceName = \"Abaco\"\n\n\tcase \"ERRORINGSOURCE\":\n\t\ts.ActiveSource = DataSource(s.erroring)\n\t\ts.status.SourceName = \"Erroring\"\n\n\tdefault:\n\t\treturn fmt.Errorf(\"data Source \\\"%s\\\" is not recognized\", *sourceName)\n\t}\n\n\tlog.Printf(\"Starting data source named %s\\n\", *sourceName)\n\ts.status.Running = true\n\tif err := Start(s.ActiveSource, s.queuedRequests, s.status.Npresamp, s.status.Nsamples); err != nil {\n\t\ts.status.Running = false\n\t\ts.isSourceActive = false\n\t\treturn err\n\t}\n\ts.isSourceActive = true\n\ts.status.SamplePeriod = s.ActiveSource.SamplePeriod()\n\ts.status.Nchannels = s.ActiveSource.Nchan()\n\ts.status.ChanGroups = s.ActiveSource.ChanGroups()\n\ts.broadcastStatus()\n\ts.broadcastTriggerState()\n\ts.broadcastGroupTriggerState()\n\ts.broadcastChannelNames()\n\ts.storeChannelGroups()\n\t*reply = true\n\treturn nil\n}", "func (es *EventStream) Start() {\n\tif es.Events == nil {\n\t\tes.Events = make(chan []Event)\n\t}\n\n\t// register eventstream in the local registry for later lookup\n\t// in C callback\n\tcbInfo := registry.Add(es)\n\tes.registryID = cbInfo\n\tes.uuid = GetDeviceUUID(es.Device)\n\tes.start(es.Paths, cbInfo)\n}", "func newEventSource() *v1alpha1.AWSSQSSource {\n\tsrc := &v1alpha1.AWSSQSSource{\n\t\tSpec: v1alpha1.AWSSQSSourceSpec{\n\t\t\tARN: NewARN(sqs.ServiceName, \"triggermeshtest\"),\n\t\t},\n\t}\n\n\tPopulate(src)\n\n\treturn src\n}", "func newEventSource(opts ...sourceOption) *v1alpha1.AWSSNSSource {\n\tsrc := &v1alpha1.AWSSNSSource{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: tNs,\n\t\t\tName: tName,\n\t\t},\n\t\tStatus: v1alpha1.AWSSNSSourceStatus{\n\t\t\tStatus: commonv1alpha1.Status{\n\t\t\t\tSourceStatus: duckv1.SourceStatus{\n\t\t\t\t\tSinkURI: tSinkURI,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// *reconcilerImpl.Reconcile calls this method before any reconciliation loop. Calling it here ensures that the\n\t// object is initialized in the same manner, and prevents tests from wrongly reporting unexpected status updates.\n\treconciler.PreProcessReconcile(context.Background(), src)\n\n\tfor _, opt := range opts {\n\t\topt(src)\n\t}\n\n\treturn src\n}", "func (c *Client) StartStreams(symbols []string, handlerGetter api.HandlerGetter) (<-chan struct{}, error) {\n\t// switch c.source {\n\t// case \"live\":\n\t// \treturn nil, fmt.Errorf(\"source %v not yet supported\", c.source)\n\t// case \"paper\":\n\treturn c.startPaperStreams(symbols, handlerGetter)\n\t// default:\n\t// \treturn c.simClient.StartLogfileStreams(c.source, symbols, handlerGetter)\n\t// }\n}", "func (s *stream) Start() error {\n\tif s.ref != nilstream {\n\t\treturn nil\n\t}\n\tp := C.CFStringCreateWithCStringNoCopy(C.kCFAllocatorDefault, C.CString(s.path), C.kCFStringEncodingUTF8, C.kCFAllocatorDefault)\n\tpath := C.CFArrayCreate(C.kCFAllocatorDefault, (*unsafe.Pointer)(unsafe.Pointer(&p)), 1, nil)\n\tctx := C.FSEventStreamContext{}\n\tref := C.EventStreamCreate(&ctx, C.uintptr_t(s.info), path, C.FSEventStreamEventId(atomic.LoadUint64(&since)), latency, flags)\n\tif ref == nilstream {\n\t\treturn errCreate\n\t}\n\tC.FSEventStreamSetDispatchQueue(ref, q)\n\tif C.FSEventStreamStart(ref) == C.Boolean(0) {\n\t\tC.FSEventStreamInvalidate(ref)\n\t\treturn errStart\n\t}\n\ts.ref = ref\n\treturn nil\n}", "func NewStreamStartEvent(stream_id string) *Event {\n\ts := (*C.gchar)(C.CString(stream_id))\n\tdefer C.free(unsafe.Pointer(s))\n\tr := new(Event)\n\tr.SetPtr(glib.Pointer(C.gst_event_new_stream_start(s)))\n\treturn r\n}", "func (s *Basegff3Listener) EnterSource(ctx *SourceContext) {}", "func (s *EventStore) StartTracing() {\n\ts.traceMu.Lock()\n\tdefer s.traceMu.Unlock()\n\n\ts.tracing = true\n}", "func (s *Scheduler) setSource() {\n\tif s.logSource != nil {\n\t\ts.sourceMgr.RemoveSource(s.logSource)\n\t}\n\n\ts.logSource = sources.NewLogSource(s.sourceName, &config.LogsConfig{\n\t\tType: config.StringChannelType,\n\t\tSource: s.source,\n\t\tChannel: s.logsChan,\n\t})\n\ts.sourceMgr.AddSource(s.logSource)\n}", "func (client *cloudClient) startTraceEventCollector(serviceID akid.ServiceID, loggingOptions daemon.LoggingOptions) {\n\tserviceInfo, traceInfo := client.getInfo(serviceID, loggingOptions.TraceID)\n\tif serviceInfo == nil {\n\t\tprinter.Warningf(\"Got a new trace from the cloud for an unregistered service: %q\\n\", akid.String(serviceID))\n\t\treturn\n\t}\n\n\tif traceInfo != nil {\n\t\tif traceInfo.active {\n\t\t\tprinter.Debugf(\"Got an allegedly new trace from the cloud, but already collecting events for that trace: %q\\n\", akid.String(loggingOptions.TraceID))\n\t\t}\n\n\t\t// Reactivate the trace and update its logging options.\n\t\ttraceInfo.active = true\n\t\ttraceInfo.loggingOptions = loggingOptions\n\t\treturn\n\t}\n\n\t// Start a collector goroutine.\n\ttraceEventChannel := make(chan *TraceEvent, TRACE_BUFFER_SIZE)\n\tgo collectTraces(traceEventChannel, serviceInfo.learnClient, serviceID, loggingOptions, client.plugins)\n\n\t// Register the newly discovered trace.\n\tserviceInfo.traces[loggingOptions.TraceID] = newTraceInfo(loggingOptions, traceEventChannel)\n}", "func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error {\n\tlog := logging.FromContext(ctx).\n\t\tWith(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName())\n\tlog.Info(\"started processing the Pulsar event source...\")\n\tdefer sources.Recover(el.GetEventName())\n\n\tmsgChannel := make(chan pulsar.ConsumerMessage)\n\n\tpulsarEventSource := &el.PulsarEventSource\n\n\tsubscriptionType := pulsar.Exclusive\n\tif pulsarEventSource.Type == \"shared\" {\n\t\tsubscriptionType = pulsar.Shared\n\t}\n\n\tlog.Info(\"setting consumer options...\")\n\tconsumerOpt := pulsar.ConsumerOptions{\n\t\tTopics: pulsarEventSource.Topics,\n\t\tSubscriptionName: el.EventName,\n\t\tType: subscriptionType,\n\t\tMessageChannel: msgChannel,\n\t}\n\n\tlog.Info(\"setting client options...\")\n\tvar err error\n\ttlsTrustCertsFilePath := \"\"\n\tif pulsarEventSource.TLSTrustCertsSecret != nil {\n\t\ttlsTrustCertsFilePath, err = common.GetSecretVolumePath(pulsarEventSource.TLSTrustCertsSecret)\n\t\tif err != nil {\n\t\t\tlog.Errorw(\"failed to get TLSTrustCertsFilePath from the volume\", zap.Error(err))\n\t\t\treturn err\n\t\t}\n\t}\n\tclientOpt := pulsar.ClientOptions{\n\t\tURL: pulsarEventSource.URL,\n\t\tTLSTrustCertsFilePath: tlsTrustCertsFilePath,\n\t\tTLSAllowInsecureConnection: pulsarEventSource.TLSAllowInsecureConnection,\n\t\tTLSValidateHostname: pulsarEventSource.TLSValidateHostname,\n\t}\n\n\tif pulsarEventSource.AuthTokenSecret != nil {\n\t\ttoken, err := common.GetSecretFromVolume(pulsarEventSource.AuthTokenSecret)\n\t\tif err != nil {\n\t\t\tlog.Errorw(\"failed to get AuthTokenSecret from the volume\", zap.Error(err))\n\t\t\treturn err\n\t\t}\n\t\tclientOpt.Authentication = pulsar.NewAuthenticationToken(token)\n\t}\n\n\tif len(pulsarEventSource.AuthAthenzParams) > 0 && pulsarEventSource.AuthAthenzSecret != nil {\n\t\tlog.Info(\"setting athenz auth option...\")\n\t\tauthAthenzFilePath, err := common.GetSecretVolumePath(pulsarEventSource.AuthAthenzSecret)\n\t\tif err != nil {\n\t\t\tlog.Errorw(\"failed to get authAthenzSecret from the volume\", zap.Error(err))\n\t\t\treturn err\n\t\t}\n\t\tpulsarEventSource.AuthAthenzParams[\"privateKey\"] = \"file://\" + authAthenzFilePath\n\t\tclientOpt.Authentication = pulsar.NewAuthenticationAthenz(pulsarEventSource.AuthAthenzParams)\n\t}\n\n\tif pulsarEventSource.TLS != nil {\n\t\tlog.Info(\"setting tls auth option...\")\n\t\tvar clientCertPath, clientKeyPath string\n\t\tswitch {\n\t\tcase pulsarEventSource.TLS.ClientCertSecret != nil && pulsarEventSource.TLS.ClientKeySecret != nil:\n\t\t\tclientCertPath, err = common.GetSecretVolumePath(pulsarEventSource.TLS.ClientCertSecret)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorw(\"failed to get ClientCertPath from the volume\", zap.Error(err))\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tclientKeyPath, err = common.GetSecretVolumePath(pulsarEventSource.TLS.ClientKeySecret)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorw(\"failed to get ClientKeyPath from the volume\", zap.Error(err))\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"invalid TLS config\")\n\t\t}\n\t\tclientOpt.Authentication = pulsar.NewAuthenticationTLS(clientCertPath, clientKeyPath)\n\t}\n\n\tvar client pulsar.Client\n\n\tif err := common.DoWithRetry(pulsarEventSource.ConnectionBackoff, func() error {\n\t\tvar err error\n\t\tif client, err = pulsar.NewClient(clientOpt); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"failed to connect to %s for event source %s, %w\", pulsarEventSource.URL, el.GetEventName(), err)\n\t}\n\n\tlog.Info(\"subscribing to messages on the topic...\")\n\tconsumer, err := client.Subscribe(consumerOpt)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to connect to topic %+v for event source %s, %w\", pulsarEventSource.Topics, el.GetEventName(), err)\n\t}\n\nconsumeMessages:\n\tfor {\n\t\tselect {\n\t\tcase msg, ok := <-msgChannel:\n\t\t\tif !ok {\n\t\t\t\tlog.Error(\"failed to read a message, channel might have been closed\")\n\t\t\t\treturn fmt.Errorf(\"channel might have been closed\")\n\t\t\t}\n\n\t\t\tif err := el.handleOne(msg, dispatch, log); err != nil {\n\t\t\t\tlog.Errorw(\"failed to process a Pulsar event\", zap.Error(err))\n\t\t\t\tel.Metrics.EventProcessingFailed(el.GetEventSourceName(), el.GetEventName())\n\t\t\t}\n\n\t\t\tif err := consumer.Ack(msg.Message); err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to process a consumer ack, %w\", err)\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\tconsumer.Close()\n\t\t\tclient.Close()\n\t\t\tbreak consumeMessages\n\t\t}\n\t}\n\n\tlog.Info(\"event source is stopped\")\n\treturn nil\n}", "func startLogEmitter(bus eventbus.EventDispatcher, src source, sinks []sink, done func()) {\n\tif done != nil {\n\t\tdefer done()\n\t}\n\tscanner := bufio.NewScanner(src.in)\n\tfor scanner.Scan() {\n\t\tdata := scanner.Bytes()\n\t\tsrc.q.Add(string(data))\n\n\t\tpayload := LogEvent{\n\t\t\tTimestamp: time.Now().UTC(),\n\t\t\tLine: data,\n\t\t}\n\t\tevt, err := eventbus.NewEvent(LogLine, payload)\n\t\tif err != nil {\n\t\t\tnewError(bus, EventError{fmt.Errorf(\"unable to construct log event: %v\", err)})\n\t\t}\n\t\tbus.Dispatch(evt, LogTopic)\n\n\t\tfor _, s := range sinks {\n\t\t\tif _, err := s.out.Write(append(data, '\\n')); err != nil {\n\t\t\t\tnewError(bus, SinkError{fmt.Errorf(\"error writing to sink %s: %v\", src.name, err)})\n\t\t\t}\n\t\t}\n\t}\n}", "func FromEventSource(ch chan interface{}, opts ...options.Option) Observable {\n\treturn newHotObservableFromChannel(ch, opts...)\n}", "func (s *BaseServer) StartStreams(ctx context.Context, cb EventCallback) {\n\ts.m.RLock()\n\tdefer s.m.RUnlock()\n\tfor key, obj := range s.objects {\n\t\tfor _, stream := range obj.Streams() {\n\t\t\tgo s.streamWorker(ctx, key, stream, cb)\n\t\t}\n\t}\n}", "func newEventSource() *v1alpha1.AWSSNSSource {\n\tsrc := &v1alpha1.AWSSNSSource{\n\t\tSpec: v1alpha1.AWSSNSSourceSpec{\n\t\t\tARN: tTopicARN,\n\t\t\tSubscriptionAttributes: map[string]*string{\n\t\t\t\t\"DeliveryPolicy\": aws.String(`{\"healthyRetryPolicy\":{\"numRetries\":5}}`),\n\t\t\t},\n\t\t},\n\t}\n\n\t// assume finalizer is already set to prevent the generated reconciler\n\t// from generating an extra Patch action\n\tsrc.Finalizers = []string{sources.AWSSNSSourceResource.String()}\n\n\tPopulate(src)\n\n\treturn src\n}", "func (es Streamer) Start(logs chan<- types.EventData, errs chan<- error) {\n\tapps := LoadApplications(es.RegistryPath)\n\n\tes.logs = logs\n\tes.errs = errs\n\n\tclient, err := ethclient.Dial(es.WebsocketURL)\n\tif err != nil {\n\t\tes.errs <- err\n\t}\n\n\tchainID, err := client.NetworkID(context.Background())\n\tif err != nil {\n\t\tes.errs <- err\n\t}\n\tlog.Info(fmt.Sprintf(\"Connected to Ethereum chain ID %s\\n\", chainID))\n\n\t// Start application subscriptions\n\tappEvents := make(chan ctypes.Log)\n\tfor _, app := range apps {\n\t\tquery := es.buildSubscriptionFilter(app)\n\n\t\t// Start the contract subscription\n\t\t_, err := client.SubscribeFilterLogs(context.Background(), query, appEvents)\n\t\tif err != nil {\n\t\t\tlog.Info(fmt.Sprintf(\"Failed to subscribe to app %s\\n\", app.ID))\n\t\t} else {\n\t\t\tlog.Info(fmt.Sprintf(\"Subscribed to app %s\\n\", app.ID))\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\t// case err := <-sub.Err(): // TODO: capture subscription errors\n\t\t// \tes.errs <- err\n\t\tcase vLog := <-appEvents:\n\t\t\tlog.Info(fmt.Sprintf(\"Witnessed tx %s on app %s\\n\", vLog.TxHash.Hex(), vLog.Address.Hex()))\n\t\t\teventData := types.NewEventData(vLog.Address, vLog)\n\t\t\tes.logs <- eventData\n\t\t}\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
FetchOrganization provides a mock function with given fields: filters
func (_m *OrganizationFetcher) FetchOrganization(filters []services.QueryFilter) (models.Organization, error) { ret := _m.Called(filters) var r0 models.Organization if rf, ok := ret.Get(0).(func([]services.QueryFilter) models.Organization); ok { r0 = rf(filters) } else { r0 = ret.Get(0).(models.Organization) } var r1 error if rf, ok := ret.Get(1).(func([]services.QueryFilter) error); ok { r1 = rf(filters) } else { r1 = ret.Error(1) } return r0, r1 }
[ "func (m *MockOrganizationServiceClient) FetchOrganizationList(arg0 context.Context, arg1 *organization.Empty, arg2 ...grpc.CallOption) (*organization.OrganizationListResponse, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"FetchOrganizationList\", varargs...)\n\tret0, _ := ret[0].(*organization.OrganizationListResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockOrganizationServiceClient) FetchUserListByOrganization(arg0 context.Context, arg1 *organization.ByOrganizationRequest, arg2 ...grpc.CallOption) (*organization.UserListResponse, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"FetchUserListByOrganization\", varargs...)\n\tret0, _ := ret[0].(*organization.UserListResponse)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockOrganizationService) FindOrganizations(arg0 context.Context, arg1 influxdb.OrganizationFilter, arg2 ...influxdb.FindOptions) ([]*influxdb.Organization, int, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{arg0, arg1}\n\tfor _, a := range arg2 {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"FindOrganizations\", varargs...)\n\tret0, _ := ret[0].([]*influxdb.Organization)\n\tret1, _ := ret[1].(int)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func (_m *ArticleRepository) Fetch(ctx context.Context, cursor string, num int64) ([]domain.domain, string, error) {\n\tret := _m.Called(ctx, cursor, num)\n\n\tvar r0 []domain.Change\n\tif rf, ok := ret.Get(0).(func(context.Context, string, int64) []domain.Change); ok {\n\t\tr0 = rf(ctx, cursor, num)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]domain.Change)\n\t\t}\n\t}\n\n\tvar r1 string\n\tif rf, ok := ret.Get(1).(func(context.Context, string, int64) string); ok {\n\t\tr1 = rf(ctx, cursor, num)\n\t} else {\n\t\tr1 = ret.Get(1).(string)\n\t}\n\n\tvar r2 error\n\tif rf, ok := ret.Get(2).(func(context.Context, string, int64) error); ok {\n\t\tr2 = rf(ctx, cursor, num)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1, r2\n}", "func (_m *FakePublicDashboardService) FindAll(ctx context.Context, u *user.SignedInUser, orgId int64) ([]models.PublicDashboardListResponse, error) {\n\tret := _m.Called(ctx, u, orgId)\n\n\tvar r0 []models.PublicDashboardListResponse\n\tif rf, ok := ret.Get(0).(func(context.Context, *user.SignedInUser, int64) []models.PublicDashboardListResponse); ok {\n\t\tr0 = rf(ctx, u, orgId)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]models.PublicDashboardListResponse)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(context.Context, *user.SignedInUser, int64) error); ok {\n\t\tr1 = rf(ctx, u, orgId)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (m *MockDatabase) GetOrganizations() (*[]model.Organization, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetOrganizations\")\n\tret0, _ := ret[0].(*[]model.Organization)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *Client) GetOrganizationFields(arg0 context.Context) ([]zendesk.OrganizationField, zendesk.Page, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetOrganizationFields\", arg0)\n\tret0, _ := ret[0].([]zendesk.OrganizationField)\n\tret1, _ := ret[1].(zendesk.Page)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}", "func queryOrganization(ctx context.Context, r *http.Request, svc platform.OrganizationService) (o *platform.Organization, err error) {\n\tfilter := platform.OrganizationFilter{}\n\tif reqID := r.URL.Query().Get(OrgID); reqID != \"\" {\n\t\tfilter.ID, err = platform.IDFromString(reqID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif name := r.URL.Query().Get(OrgName); name != \"\" {\n\t\tfilter.Name = &name\n\t}\n\n\treturn svc.FindOrganization(ctx, filter)\n}", "func (m *MockDatabase) GetOrganizationByAgolaRef(organizationName string) (*model.Organization, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetOrganizationByAgolaRef\", organizationName)\n\tret0, _ := ret[0].(*model.Organization)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *Forge) Org(ctx context.Context, u *model.User, org string) (*model.Org, error) {\n\tret := _m.Called(ctx, u, org)\n\n\tvar r0 *model.Org\n\tvar r1 error\n\tif rf, ok := ret.Get(0).(func(context.Context, *model.User, string) (*model.Org, error)); ok {\n\t\treturn rf(ctx, u, org)\n\t}\n\tif rf, ok := ret.Get(0).(func(context.Context, *model.User, string) *model.Org); ok {\n\t\tr0 = rf(ctx, u, org)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*model.Org)\n\t\t}\n\t}\n\n\tif rf, ok := ret.Get(1).(func(context.Context, *model.User, string) error); ok {\n\t\tr1 = rf(ctx, u, org)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *OfficialCompanyRepository) GetCompaniesList(_a0 uint, _a1 uint) ([]models.OfficialCompany, error) {\n\tret := _m.Called(_a0, _a1)\n\n\tvar r0 []models.OfficialCompany\n\tif rf, ok := ret.Get(0).(func(uint, uint) []models.OfficialCompany); ok {\n\t\tr0 = rf(_a0, _a1)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]models.OfficialCompany)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(uint, uint) error); ok {\n\t\tr1 = rf(_a0, _a1)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *Repository) Fetch(ctx context.Context, cursor string, num int64) (res []*models.User, nextCursor string, err error) {\n\tret := _m.Called(ctx, cursor,num)\n\n\tvar r0 []*models.User\n\tif rf, ok := ret.Get(0).(func(context.Context, string,int64) []*models.User); ok {\n\t\tr0 = rf(ctx, cursor,num)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]*models.User)\n\t\t}\n\t}\n\n\tvar r1 string\n\tif rf, ok := ret.Get(1).(func(context.Context, string,int64) string); ok {\n\t\tr1 = rf(ctx, cursor,num)\n\t} else {\n\t\tr1 = ret.Get(1).(string)\n\t}\n\n\tvar r2 error\n\tif rf, ok := ret.Get(2).(func(context.Context, string,int64) error); ok {\n\t\tr2 = rf(ctx, cursor,num)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1,r2\n}", "func (_m *IUserService) List(tenantId int, page common.Pagination, filters model.UserFilterList) (model.UserDtos, *common.PageResult, error) {\n\tret := _m.Called(tenantId, page, filters)\n\n\tvar r0 model.UserDtos\n\tif rf, ok := ret.Get(0).(func(int, common.Pagination, model.UserFilterList) model.UserDtos); ok {\n\t\tr0 = rf(tenantId, page, filters)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(model.UserDtos)\n\t\t}\n\t}\n\n\tvar r1 *common.PageResult\n\tif rf, ok := ret.Get(1).(func(int, common.Pagination, model.UserFilterList) *common.PageResult); ok {\n\t\tr1 = rf(tenantId, page, filters)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(*common.PageResult)\n\t\t}\n\t}\n\n\tvar r2 error\n\tif rf, ok := ret.Get(2).(func(int, common.Pagination, model.UserFilterList) error); ok {\n\t\tr2 = rf(tenantId, page, filters)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1, r2\n}", "func (_m *AlbumRepository) Search(includePrivate bool, includeNoMedias bool, limit int, offset int, term string, order string) ([]album.Album, error) {\n\tret := _m.Called(includePrivate, includeNoMedias, limit, offset, term, order)\n\n\tvar r0 []album.Album\n\tif rf, ok := ret.Get(0).(func(bool, bool, int, int, string, string) []album.Album); ok {\n\t\tr0 = rf(includePrivate, includeNoMedias, limit, offset, term, order)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]album.Album)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(bool, bool, int, int, string, string) error); ok {\n\t\tr1 = rf(includePrivate, includeNoMedias, limit, offset, term, order)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (_m *API) SearchPeople(query string, page int) ([]model.PeopleSearch, int, error) {\n\tret := _m.Called(query, page)\n\n\tvar r0 []model.PeopleSearch\n\tif rf, ok := ret.Get(0).(func(string, int) []model.PeopleSearch); ok {\n\t\tr0 = rf(query, page)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]model.PeopleSearch)\n\t\t}\n\t}\n\n\tvar r1 int\n\tif rf, ok := ret.Get(1).(func(string, int) int); ok {\n\t\tr1 = rf(query, page)\n\t} else {\n\t\tr1 = ret.Get(1).(int)\n\t}\n\n\tvar r2 error\n\tif rf, ok := ret.Get(2).(func(string, int) error); ok {\n\t\tr2 = rf(query, page)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\n\treturn r0, r1, r2\n}", "func (m *MockDatabase) GetOrganizationsByGitSource(gitSource string) (*[]model.Organization, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetOrganizationsByGitSource\", gitSource)\n\tret0, _ := ret[0].(*[]model.Organization)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockOrganizationService) FindOrganization(arg0 context.Context, arg1 influxdb.OrganizationFilter) (*influxdb.Organization, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"FindOrganization\", arg0, arg1)\n\tret0, _ := ret[0].(*influxdb.Organization)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (m *MockOrganizationLister) Organizations(arg0 *mongodbatlas.ListOptions) (*mongodbatlas.Organizations, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Organizations\", arg0)\n\tret0, _ := ret[0].(*mongodbatlas.Organizations)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (_m *UserRepositoryI) FetchAll(tx database.TransactionI, params apidatabase.UsersSelectParams) ([]*models.UserPublicInfo, error) {\n\tret := _m.Called(tx, params)\n\n\tvar r0 []*models.UserPublicInfo\n\tif rf, ok := ret.Get(0).(func(database.TransactionI, apidatabase.UsersSelectParams) []*models.UserPublicInfo); ok {\n\t\tr0 = rf(tx, params)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]*models.UserPublicInfo)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func(database.TransactionI, apidatabase.UsersSelectParams) error); ok {\n\t\tr1 = rf(tx, params)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
pingLoop periodically sends a ping to all remote clusters.
func (rcs *Service) pingLoop(done <-chan struct{}) { pingChan := make(chan *model.RemoteCluster, MaxConcurrentSends*2) // create a thread pool to send pings concurrently to remotes. for i := 0; i < MaxConcurrentSends; i++ { go rcs.pingEmitter(pingChan, done) } go rcs.pingGenerator(pingChan, done) }
[ "func pingLoop(results chan Host, hostRegistry *HostRegistry, interval time.Duration, timeout time.Duration) {\n\tfor {\n\t\thostAddresses := hostRegistry.GetHostAddresses()\n\n\t\tlog.Info(\"Pinging these addresses: %q\\n\", hostAddresses)\n\n\t\tfor _, address := range hostAddresses {\n\t\t\tlog.Debug(\"Pinging: %v\\n\", address)\n\n\t\t\thost, err := hostRegistry.GetHost(address)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warning(\"GetHost() returned error=%v for address=%v\", err, address)\n\t\t\t}\n\n\t\t\tgo pingAddress(results, host, timeout)\n\t\t}\n\n\t\tlog.Debug(\"Started pings for all hosts. Sleeping for: %v\", interval)\n\t\ttime.Sleep(interval)\n\t}\n}", "func pingLoop(client *Client) {\n\t// Create ticker to send pings every two minutes.\n\tticker := time.NewTicker(time.Minute * 2)\n\tfor {\n\t\tselect {\n\t\t// If the client is done, stop the time and goroutine.\n\t\tcase <-client.Done:\n\t\t\tticker.Stop()\n\t\t\treturn\n\t\t// Loop pings to keep connection alive.\n\t\tcase <-ticker.C:\n\t\t\tSendPing(client, strconv.FormatInt(time.Now().UnixNano(), 10))\n\t\t}\n\t}\n}", "func (PingCIMunger) EachLoop(_ *github_util.Config) error { return nil }", "func (conn *Conn) ping() {\n\ttick := time.NewTicker(conn.PingFreq)\n\tfor {\n\t\tselect {\n\t\tcase <-tick.C:\n\t\t\tconn.Raw(fmt.Sprintf(\"PING :%d\", time.Now().UnixNano()))\n\t\tcase <-conn.cPing:\n\t\t\ttick.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (conn *Conn) ping(ctx context.Context) {\n\tdefer conn.wg.Done()\n\ttick := time.NewTicker(conn.cfg.PingFreq)\n\tfor {\n\t\tselect {\n\t\tcase <-tick.C:\n\t\t\tconn.Ping(fmt.Sprintf(\"%d\", time.Now().UnixNano()))\n\t\tcase <-ctx.Done():\n\t\t\t// control channel closed, bail out\n\t\t\ttick.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}", "func periodicPing() {\n\tfor {\n\t\t// Shuffle membership list and get a member\n\t\t// Only executed when the membership list is not empty\n\t\tif CurrentList.Size() > 0 {\n\t\t\tmember := CurrentList.Shuffle()\n\t\t\t// Do not pick itself as the ping target\n\t\t\tif (member.TimeStamp == CurrentMember.TimeStamp) && (member.IP == CurrentMember.IP) {\n\t\t\t\ttime.Sleep(PingSendingPeriod)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tLogger.Info(\"Member (%d, %d) is selected by shuffling\\n\", member.TimeStamp, member.IP)\n\t\t\t// Get update entry from TTL Cache\n\t\t\tupdate, flag, err := getUpdate()\n\t\t\t// if no update there, do pure ping\n\t\t\tif err != nil {\n\t\t\t\tping(member)\n\t\t\t} else {\n\t\t\t\t// Send update as payload of ping\n\t\t\t\tpingWithPayload(member, update, flag)\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(PingSendingPeriod)\n\t}\n}", "func (connection *Connection) ping() {\n\tfor {\n\t\ttime.Sleep(1 * time.Second)\n\t\tif len(connection.consumers) > 0 {\n\t\t\t//do some ping, if no response then kill it\n\t\t\tfor _, consumer := range connection.consumers {\n\t\t\t\t_, pingError := consumer.connection.Write([]byte(\"hunga\"))\n\t\t\t\tif pingError != nil {\n\t\t\t\t\t// fmt.Print(\"PING ERROR\")\n\t\t\t\t\tconnection.killConsumer(consumer.id)\n\t\t\t\t} else {\n\t\t\t\t\tconnection.getConsumerMessage(consumer.id)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *Impl) ping() {\n\tgo func(c *Impl) {\n\t\tt := time.NewTicker(3 * time.Second)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-t.C:\n\t\t\t\terr := c.conn.SendSignal(frame.NewPingFrame())\n\t\t\t\tlogger.Info(\"Send Ping to zipper.\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tif err.Error() == quic.ErrConnectionClosed {\n\t\t\t\t\t\tlogger.Print(\"[client] ❌ the zipper was offline.\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// other errors.\n\t\t\t\t\t\tlogger.Error(\"[client] ❌ sent Ping to zipper failed.\", \"err\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\tt.Stop()\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}(c)\n}", "func (m *Monitor) runMonitorLoop() error {\n\t// Get all nodes in cluster\n\tcfg, err := kubeutils.BuildConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\tclient, err := clientset.NewForConfig(cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tklog.Info(\"started master\")\n\tvar deadNodes []*v1.Node\n\tfor {\n\t\t// Don't thrash here..\n\t\tklog.V(4).Info(\"little pause before work\")\n\t\ttime.Sleep(pausePollingSecs)\n\n\t\t// Get all the nodes - that have been reported as UnReachable...\n\t\t// reporting happens using configmaps in specified namespace\n\t\tdeadNodes, err = kubeutils.GetUnreachableNodes(client, m.namespace)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"error getting nodes reported as unreachable: %s\", err)\n\t\t\t// Try again\n\t\t\tcontinue\n\t\t}\n\t\tklog.V(3).Infof(\"got an unreachable node list (%d nodes)\", len(deadNodes))\n\n\t\t// reap any nodes as required...\n\t\tif m.reap && len(deadNodes) > 0 {\n\t\t\tklog.V(4).Info(\"We are set to reap\")\n\t\t\tfor _, node := range deadNodes {\n\t\t\t\tif err := reaper.Reap(node, client, m.dryRun); err != nil {\n\t\t\t\t\tklog.Errorf(\"error reaping %s, %s\", node.Name, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func pingPeriodically(xm xmppC, timeout, interval time.Duration) error {\n\tt := time.NewTimer(interval)\n\tdefer t.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-t.C:\n\t\t\tif xm.IsClosed() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif err := xm.Ping(timeout); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.Reset(interval)\n\t\t}\n\t}\n}", "func (c *Client) cyclePings() {\n\tc.instruments.Log(octo.LOGINFO, c.info.UUID, \"websocket.Client.cyclePings\", \"Started\")\n\tdefer c.wg.Done()\n\n\tticker := time.NewTicker(consts.MaxPingInterval)\n\n\t{\n\tcycleloop:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-c.closer:\n\t\t\t\tticker.Stop()\n\t\t\t\tbreak cycleloop\n\t\t\tcase _, ok := <-ticker.C:\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak cycleloop\n\t\t\t\t}\n\n\t\t\t\tc.Conn.WriteMessage(websocket.PingMessage, nil)\n\t\t\t}\n\t\t}\n\n\t}\n\n\tc.instruments.Log(octo.LOGINFO, c.info.UUID, \"websocket.Client.cyclePings\", \"Completed\")\n}", "func (rcs *Service) pingEmitter(pingChan <-chan *model.RemoteCluster, done <-chan struct{}) {\n\tfor {\n\t\tselect {\n\t\tcase rc := <-pingChan:\n\t\t\tif rc == nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tonline := rc.IsOnline()\n\n\t\t\tif err := rcs.pingRemote(rc); err != nil {\n\t\t\t\trcs.server.Log().Log(mlog.LvlRemoteClusterServiceWarn, \"Remote cluster ping failed\",\n\t\t\t\t\tmlog.String(\"remote\", rc.DisplayName),\n\t\t\t\t\tmlog.String(\"remoteId\", rc.RemoteId),\n\t\t\t\t\tmlog.Err(err),\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tif online != rc.IsOnline() {\n\t\t\t\tif metrics := rcs.server.GetMetrics(); metrics != nil {\n\t\t\t\t\tmetrics.IncrementRemoteClusterConnStateChangeCounter(rc.RemoteId, rc.IsOnline())\n\t\t\t\t}\n\t\t\t\trcs.fireConnectionStateChgEvent(rc)\n\t\t\t}\n\t\tcase <-done:\n\t\t\treturn\n\t\t}\n\t}\n}", "func Ping(node *shared.Node) {\n\tfor {\n\t\tblockchain.SwimBatchPuzzleGenerator(node)\n\n\t\ttime.Sleep(pingInterval)\n\t\ttarget := node.MembersSet.GetRandom()\n\t\tif target == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\ttargetPeer := strings.Split(target, \" \")\n\t\tip := targetPeer[0]\n\t\tport := targetPeer[1]\n\t\tconn, err := net.Dial(\"tcp\", ip+\":\"+port)\n\t\tif err != nil {\n\t\t\t// failure detected!\n\t\t\tif strings.HasSuffix(err.Error(), \"connect: connection refused\") {\n\t\t\t\tnode.MembersSet.SetDelete(target)\n\t\t\t\tnode.FailMsgBuffer.Add(target)\n\t\t\t\tfmt.Println(\"FAILURE DETECTED \" + target)\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Dial Error: \", err)\n\t\t\t}\n\t\t} else {\n\t\t\t// SWIM Implementation would send membership update message here\n\t\t\tswimMsg := \"DEAD \" + strings.Join(node.FailMsgBuffer.GetN(10), \",\") + \"\\n\"\n\t\t\tlogBandwithInfo(\"Send\", len(swimMsg))\n\t\t\tfmt.Fprintf(conn, swimMsg)\n\t\t\tfmt.Print(\"SWIM SENT \" + swimMsg)\n\t\t\ttransactionsMsg := strings.Join(node.TransactionBuffer.GetN(10000), \"\\n\") + \"\\n\"\n\t\t\tlogBandwithInfo(\"Send\", len(transactionsMsg))\n\t\t\tfmt.Fprintf(conn, transactionsMsg)\n\t\t\tfor _, block := range node.BlockBuffer.GetAll() {\n\t\t\t\tblockchain.SendBlock(node, conn, block)\n\t\t\t}\n\n\t\t\tconn.Close()\n\t\t}\n\t}\n}", "func (rcs *Service) pingRemote(rc *model.RemoteCluster) error {\n\tframe, err := makePingFrame(rc)\n\tif err != nil {\n\t\treturn err\n\t}\n\turl := fmt.Sprintf(\"%s/%s\", rc.SiteURL, PingURL)\n\n\tresp, err := rcs.sendFrameToRemote(PingTimeout, rc, frame, url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tping := model.RemoteClusterPing{}\n\terr = json.Unmarshal(resp, &ping)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := rcs.server.GetStore().RemoteCluster().SetLastPingAt(rc.RemoteId); err != nil {\n\t\trcs.server.Log().Log(mlog.LvlRemoteClusterServiceError, \"Failed to update LastPingAt for remote cluster\",\n\t\t\tmlog.String(\"remote\", rc.DisplayName),\n\t\t\tmlog.String(\"remoteId\", rc.RemoteId),\n\t\t\tmlog.Err(err),\n\t\t)\n\t}\n\trc.LastPingAt = model.GetMillis()\n\n\tif metrics := rcs.server.GetMetrics(); metrics != nil {\n\t\tsentAt := time.Unix(0, ping.SentAt*int64(time.Millisecond))\n\t\telapsed := time.Since(sentAt).Seconds()\n\t\tmetrics.ObserveRemoteClusterPingDuration(rc.RemoteId, elapsed)\n\n\t\t// we approximate clock skew between remotes.\n\t\tskew := elapsed/2 - float64(ping.RecvAt-ping.SentAt)/1000\n\t\tmetrics.ObserveRemoteClusterClockSkew(rc.RemoteId, skew)\n\t}\n\n\trcs.server.Log().Log(mlog.LvlRemoteClusterServiceDebug, \"Remote cluster ping\",\n\t\tmlog.String(\"remote\", rc.DisplayName),\n\t\tmlog.String(\"remoteId\", rc.RemoteId),\n\t\tmlog.Int64(\"SentAt\", ping.SentAt),\n\t\tmlog.Int64(\"RecvAt\", ping.RecvAt),\n\t\tmlog.Int64(\"Diff\", ping.RecvAt-ping.SentAt),\n\t)\n\treturn nil\n}", "func (a *Agent) pingAllCandidates() {\n\tfor networkType, localCandidates := range a.localCandidates {\n\t\tif remoteCandidates, ok := a.remoteCandidates[networkType]; ok {\n\n\t\t\tfor _, localCandidate := range localCandidates {\n\t\t\t\tfor _, remoteCandidate := range remoteCandidates {\n\t\t\t\t\ta.pingCandidate(localCandidate, remoteCandidate)\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}\n}", "func (s *Server) serverHealthLoop() {\n\t// Monitor server health until shutdown\n\tticker := time.NewTicker(s.config.ServerHealthInterval)\n\tdefer ticker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-s.shutdownCh:\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tif err := s.updateClusterHealth(); err != nil {\n\t\t\t\ts.logger.Printf(\"[ERR] autopilot: error updating cluster health: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n}", "func (d *Influxstatsd) SendLoop(ctx context.Context, c <-chan time.Time, network, address string) {\n\td.WriteLoop(ctx, c, conn.NewDefaultManager(network, address, d.logger))\n}", "func (s *Service) loop() {\n\tspan, ctx := trace.StartSpanFromContextWithTraceID(context.Background(), \"\", \"service-loop\")\n\n\tif s.ClusterReportIntervalS == 0 {\n\t\ts.ClusterReportIntervalS = defaultClusterReportIntervalS\n\t}\n\tif s.HeartbeatNotifyIntervalS == 0 {\n\t\ts.HeartbeatNotifyIntervalS = defaultHeartbeatNotifyIntervalS\n\t}\n\tif s.MaxHeartbeatNotifyNum <= 0 {\n\t\ts.MaxHeartbeatNotifyNum = defaultMaxHeartbeatNotifyNum\n\t}\n\tif s.MetricReportIntervalM <= 0 {\n\t\ts.MetricReportIntervalM = defaultMetricReportIntervalM\n\t}\n\tif s.ConsistentCheckIntervalM <= 0 {\n\t\ts.ConsistentCheckIntervalM = defaultCheckConsistentIntervalM\n\t}\n\n\treportTicker := time.NewTicker(time.Duration(s.ClusterReportIntervalS) * time.Second)\n\tdefer reportTicker.Stop()\n\theartbeatNotifyTicker := time.NewTicker(time.Duration(s.HeartbeatNotifyIntervalS) * time.Second)\n\tdefer heartbeatNotifyTicker.Stop()\n\n\tmetricReportTicker := time.NewTicker(time.Duration(s.MetricReportIntervalM) * time.Minute)\n\tdefer metricReportTicker.Stop()\n\n\tcheckTicker := time.NewTicker(time.Duration(s.ConsistentCheckIntervalM) * time.Minute)\n\tdefer checkTicker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-reportTicker.C:\n\t\t\tif s.ConsulAgentAddr == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tclusterInfo := clustermgr.ClusterInfo{\n\t\t\t\tRegion: s.Region,\n\t\t\t\tClusterID: s.ClusterID,\n\t\t\t\tReadonly: s.Readonly,\n\t\t\t\tNodes: make([]string, 0),\n\t\t\t}\n\t\t\tspaceStatInfo := s.DiskMgr.Stat(ctx)\n\t\t\tclusterInfo.Capacity = spaceStatInfo.TotalSpace\n\t\t\tclusterInfo.Available = spaceStatInfo.WritableSpace\n\t\t\t// filter learner node\n\t\t\tpeers := s.raftNode.Status().Peers\n\t\t\tpeersM := make(map[uint64]raftserver.Peer)\n\t\t\tfor i := range peers {\n\t\t\t\tpeersM[peers[i].Id] = peers[i]\n\t\t\t}\n\t\t\tfor id, node := range s.raftNode.GetNodes() {\n\t\t\t\tif peersM[id].IsLearner {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tclusterInfo.Nodes = append(clusterInfo.Nodes, s.RaftConfig.RaftNodeConfig.NodeProtocol+node)\n\t\t\t}\n\n\t\t\tval, err := json.Marshal(clusterInfo)\n\t\t\tif err != nil {\n\t\t\t\tspan.Error(\"json marshal clusterInfo failed, err: \", err)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tclusterKey := clustermgr.GetConsulClusterPath(s.Region) + s.ClusterID.ToString()\n\t\t\t_, err = s.consulClient.KV().Put(&api.KVPair{Key: clusterKey, Value: val}, nil)\n\t\t\tif err != nil {\n\t\t\t\tspan.Error(\"update clusterInfo into consul failed, err: \", err)\n\t\t\t}\n\t\tcase <-heartbeatNotifyTicker.C:\n\t\t\tif !s.raftNode.IsLeader() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tchanges := s.DiskMgr.GetHeartbeatChangeDisks()\n\t\t\t// report heartbeat change metric\n\t\t\ts.reportHeartbeatChange(float64(len(changes)))\n\t\t\t// in some case, like cm's network problem, it may trigger a mounts of disk heartbeat change\n\t\t\t// in this situation, we need to ignore it and do some alert\n\t\t\tif len(changes) > s.MaxHeartbeatNotifyNum {\n\t\t\t\tspan.Error(\"a lots of disk heartbeat change happen: \", changes)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor i := range changes {\n\t\t\t\tspan.Debugf(\"notify disk heartbeat change, change info: %v\", changes[i])\n\t\t\t\terr := s.VolumeMgr.DiskWritableChange(ctx, changes[i].DiskID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tspan.Error(\"notify disk heartbeat change failed, err: \", err)\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-metricReportTicker.C:\n\t\t\ts.metricReport(ctx)\n\t\tcase <-checkTicker.C:\n\t\t\tif !s.raftNode.IsLeader() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgo func() {\n\t\t\t\tclis := make([]*clustermgr.Client, 0)\n\t\t\t\tpeers := s.raftNode.Status().Peers\n\t\t\t\tpeersM := make(map[uint64]raftserver.Peer)\n\t\t\t\tfor i := range peers {\n\t\t\t\t\tpeersM[peers[i].Id] = peers[i]\n\t\t\t\t}\n\t\t\t\tfor id, node := range s.raftNode.GetNodes() {\n\t\t\t\t\tif peersM[id].IsLearner {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\thost := s.RaftConfig.RaftNodeConfig.NodeProtocol + node\n\t\t\t\t\tcli := clustermgr.New(&clustermgr.Config{LbConfig: rpc.LbConfig{Hosts: []string{host}}})\n\t\t\t\t\tclis = append(clis, cli)\n\t\t\t\t}\n\t\t\t\tif len(clis) <= 1 {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tiVids, err := s.checkVolInfos(ctx, clis)\n\t\t\t\tif err != nil {\n\t\t\t\t\tspan.Errorf(\"get checkVolInfos failed:%v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif len(iVids) != 0 {\n\t\t\t\t\t// readIndex request may be aggregated,which could temporarily lead to each nodes volume info not equal\n\t\t\t\t\t// so use get volume do double check\n\t\t\t\t\tactualIVids, err := s.doubleCheckVolInfos(ctx, clis, iVids)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tspan.Errorf(\"double check vids:%v volume info failed:%v\", iVids, err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tif len(actualIVids) != 0 {\n\t\t\t\t\t\ts.reportInConsistentVols(actualIVids)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\tcase <-s.closeCh:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (s *Runservice) etcdPingerLoop(ctx context.Context) {\n\tfor {\n\t\tif err := s.etcdPinger(ctx); err != nil {\n\t\t\tlog.Errorf(\"err: %+v\", err)\n\t\t}\n\n\t\tsleepCh := time.NewTimer(1 * time.Second).C\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-sleepCh:\n\t\t}\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
pingEmitter pulls Remotes from the ping queue (pingChan) and pings them. Pinging a remote cannot take longer than PingTimeoutMillis.
func (rcs *Service) pingEmitter(pingChan <-chan *model.RemoteCluster, done <-chan struct{}) { for { select { case rc := <-pingChan: if rc == nil { return } online := rc.IsOnline() if err := rcs.pingRemote(rc); err != nil { rcs.server.Log().Log(mlog.LvlRemoteClusterServiceWarn, "Remote cluster ping failed", mlog.String("remote", rc.DisplayName), mlog.String("remoteId", rc.RemoteId), mlog.Err(err), ) } if online != rc.IsOnline() { if metrics := rcs.server.GetMetrics(); metrics != nil { metrics.IncrementRemoteClusterConnStateChangeCounter(rc.RemoteId, rc.IsOnline()) } rcs.fireConnectionStateChgEvent(rc) } case <-done: return } } }
[ "func (rcs *Service) pingLoop(done <-chan struct{}) {\n\tpingChan := make(chan *model.RemoteCluster, MaxConcurrentSends*2)\n\n\t// create a thread pool to send pings concurrently to remotes.\n\tfor i := 0; i < MaxConcurrentSends; i++ {\n\t\tgo rcs.pingEmitter(pingChan, done)\n\t}\n\n\tgo rcs.pingGenerator(pingChan, done)\n}", "func pingLoop(results chan Host, hostRegistry *HostRegistry, interval time.Duration, timeout time.Duration) {\n\tfor {\n\t\thostAddresses := hostRegistry.GetHostAddresses()\n\n\t\tlog.Info(\"Pinging these addresses: %q\\n\", hostAddresses)\n\n\t\tfor _, address := range hostAddresses {\n\t\t\tlog.Debug(\"Pinging: %v\\n\", address)\n\n\t\t\thost, err := hostRegistry.GetHost(address)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warning(\"GetHost() returned error=%v for address=%v\", err, address)\n\t\t\t}\n\n\t\t\tgo pingAddress(results, host, timeout)\n\t\t}\n\n\t\tlog.Debug(\"Started pings for all hosts. Sleeping for: %v\", interval)\n\t\ttime.Sleep(interval)\n\t}\n}", "func (pinger *PerpetualPinger) pingAsync(self phi.Task) {\n\tresponder := make(chan phi.Message, 1)\n\tok := pinger.ponger.Send(Ping{Responder: responder})\n\tif !ok {\n\t\tpanic(\"failed to send ping\")\n\t}\n\tgo func() {\n\t\tfor m := range responder {\n\t\t\tok := self.Send(m)\n\t\t\tif !ok {\n\t\t\t\tpanic(\"failed to receive pong\")\n\t\t\t}\n\t\t}\n\t}()\n}", "func pingAddress(results chan Host, oldHost Host, timeout time.Duration) {\n\tisUp, rtt, err := pingWithFastping(oldHost.Address, timeout)\n\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t}\n\n\tnewHost := Host{}\n\n\tnewHost.Address = oldHost.Address\n\tnewHost.Description = oldHost.Description\n\n\tif isUp {\n\t\tnewHost.Status = Online\n\t\tnewHost.Latency = rtt\n\t} else {\n\t\tnewHost.Status = Offline\n\t}\n\tlog.Info(\"Pinged: address=%q status=%s rtt=%s\\n\", newHost.Address, newHost.Status, newHost.Latency)\n\n\tresults <- newHost\n}", "func pingPeriodically(xm xmppC, timeout, interval time.Duration) error {\n\tt := time.NewTimer(interval)\n\tdefer t.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-t.C:\n\t\t\tif xm.IsClosed() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif err := xm.Ping(timeout); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.Reset(interval)\n\t\t}\n\t}\n}", "func (p *pinger) Ping(addr net.Addr) {\n\tdefer close(p.reportChan)\n\tdefer close(p.errChan)\n\n\tconn, err := icmp.ListenPacket(\"ip4:icmp\", \"\")\n\tif err != nil {\n\t\tp.errChan <- fmt.Errorf(\"cannot connect to addr %s: %v\", addr, err)\n\t\treturn\n\t}\n\tdefer conn.Close()\n\n\tseq := 0\n\tfor {\n\t\tselect {\n\t\tcase <-p.stop:\n\t\t\treturn\n\t\tdefault:\n\t\t\tping, err := p.ping(conn, addr, seq)\n\t\t\tif err != nil {\n\t\t\t\tp.errChan <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tp.reportChan <- ping\n\t\t\tseq++\n\n\t\t\tif p.opts.Count != 0 && int(p.opts.Count) == seq {\n\t\t\t\tp.Stop()\n\t\t\t} else {\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *manager) onPing(addr string, rtt time.Duration) error {\n\tv := int32(rtt.Nanoseconds() / 1000)\n\tif v == 0 { // Don't let it be zero, otherwise the update would fail\n\t\tv = 1\n\t}\n\n\tm.monitor.Measure(addr, v)\n\treturn nil\n}", "func (sbd *State) PingChecker(updateChannel chan ServiceUpdate, shutdownPingSignal chan interface{}) {\n\tif sbd.Config.PingHosts { // The ping option was set\n\t\tilog.Println(\"Started the Ping Check Provider\")\n\n\t\ttotalWaitDuration := sbd.Config.TimeBetweenPingChecks / 1 * time.Second\n\t\tcurrentWaitDuration := totalWaitDuration\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-shutdownPingSignal:\n\t\t\t\tilog.Println(\"Shutting down the Ping Check Provider\")\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\t// Sleep before testing these hosts again\n\t\t\t\tif currentWaitDuration < totalWaitDuration {\n\t\t\t\t\tcurrentWaitDuration += 1 * time.Second\n\t\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tsbd.serviceLock.RLock()\n\t\t\t\tfor i := range sbd.Hosts {\n\t\t\t\t\thost := sbd.Hosts[i]\n\t\t\t\t\t// Asyncronously ping hosts so we don't wait full timeouts and can ping faster.\n\t\t\t\t\tgo host.PingHost(updateChannel, sbd.Config.PingTimeout)\n\t\t\t\t}\n\n\t\t\t\tsbd.serviceLock.RUnlock()\n\n\t\t\t\tcurrentWaitDuration -= totalWaitDuration\n\t\t\t}\n\t\t}\n\t}\n}", "func (p *Peer) pingHandler() {\n\tpingTicker := time.NewTicker(pingInterval)\n\tdefer pingTicker.Stop()\n\nout:\n\tfor {\n\t\tselect {\n\t\tcase <-pingTicker.C:\n\t\t\tnonce, err := wire.RandomUint64()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Not sending ping to %s: %v\", p, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tp.QueueMessage(wire.NewMsgPing(nonce), nil)\n\n\t\tcase <-p.quit:\n\t\t\tbreak out\n\t\t}\n\t}\n}", "func (wp *WtPeer) pingHandler() {\n\tdefer wp.wg.Done()\n\n\tpingTicker := time.NewTicker(pingInterval)\n\tdefer pingTicker.Stop()\n\n\t// TODO(ys): make dynamic in order to create fake cover traffic\n\tconst numPingBytes = 16\n\nout:\n\tfor {\n\t\tselect {\n\t\tcase <-pingTicker.C:\n\t\t\tnow := time.Now().UnixNano()\n\t\t\tatomic.StoreInt64(&wp.pingLastSend, now)\n\t\t\twp.writeMessage(NewPingMessage(numPingBytes))\n\t\tcase <-wp.quit:\n\t\t\tbreak out\n\t\t}\n\t}\n}", "func (p Pinger) Pong(timeout int) (msgList []*icmp.Message, err error) {\r\n\tcErr := make(chan error, 1)\r\n\tcMsg := make(chan *icmp.Message, 1)\r\n\tfor i := 0; i < p.amt; i++ {\r\n\t\tgo func() {\r\n\t\t\tmsg, err := p.RecvOnePong()\r\n\t\t\tif err != nil {\r\n\t\t\t\tcErr <- err\r\n\t\t\t\treturn\r\n\t\t\t}\r\n\t\t\tcMsg <- msg\r\n\t\t}()\r\n\t}\r\n\tfor i := 0; i < p.amt; i++ {\r\n\t\tselect {\r\n\t\tcase res := <-cErr:\r\n\t\t\terr = res\r\n\t\tcase res := <-cMsg:\r\n\t\t\tmsgList = append(msgList, res)\r\n\t\tcase <-time.After(time.Duration(timeout) * time.Millisecond):\r\n\t\t\terr = errors.New(\"timeout\")\r\n\t\t}\r\n\t}\r\n\treturn\r\n}", "func Ping(address string, interval, timeout time.Duration) (*PingResponse, error) {\n\tif timeout < 0 {\n\t\treturn nil, errors.New(\"gumble: timeout must be positive\")\n\t}\n\tdeadline := time.Now().Add(timeout)\n\tconn, err := net.DialTimeout(\"udp\", address, timeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer conn.Close()\n\tconn.SetReadDeadline(deadline)\n\n\tvar (\n\t\tidsLock sync.Mutex\n\t\tids = make(map[string]time.Time)\n\t)\n\n\tbuildSendPacket := func() {\n\t\tvar packet [12]byte\n\t\tif _, err := rand.Read(packet[4:]); err != nil {\n\t\t\treturn\n\t\t}\n\t\tid := string(packet[4:])\n\t\tidsLock.Lock()\n\t\tids[id] = time.Now()\n\t\tidsLock.Unlock()\n\t\tconn.Write(packet[:])\n\t}\n\n\tif interval > 0 {\n\t\tend := make(chan struct{})\n\t\tdefer close(end)\n\t\tgo func() {\n\t\t\tticker := time.NewTicker(interval)\n\t\t\tdefer ticker.Stop()\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-ticker.C:\n\t\t\t\t\tbuildSendPacket()\n\t\t\t\tcase <-end:\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\tbuildSendPacket()\n\n\tfor {\n\t\tvar incoming [24]byte\n\t\tif _, err := io.ReadFull(conn, incoming[:]); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tid := string(incoming[4:12])\n\t\tidsLock.Lock()\n\t\tsendTime, ok := ids[id]\n\t\tidsLock.Unlock()\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn &PingResponse{\n\t\t\tAddress: conn.RemoteAddr().(*net.UDPAddr),\n\t\t\tPing: time.Since(sendTime),\n\t\t\tVersion: Version{\n\t\t\t\tVersion: binary.BigEndian.Uint32(incoming[0:]),\n\t\t\t},\n\t\t\tConnectedUsers: int(binary.BigEndian.Uint32(incoming[12:])),\n\t\t\tMaximumUsers: int(binary.BigEndian.Uint32(incoming[16:])),\n\t\t\tMaximumBitrate: int(binary.BigEndian.Uint32(incoming[20:])),\n\t\t}, nil\n\t}\n}", "func (connection *Connection) ping() {\n\tfor {\n\t\ttime.Sleep(1 * time.Second)\n\t\tif len(connection.consumers) > 0 {\n\t\t\t//do some ping, if no response then kill it\n\t\t\tfor _, consumer := range connection.consumers {\n\t\t\t\t_, pingError := consumer.connection.Write([]byte(\"hunga\"))\n\t\t\t\tif pingError != nil {\n\t\t\t\t\t// fmt.Print(\"PING ERROR\")\n\t\t\t\t\tconnection.killConsumer(consumer.id)\n\t\t\t\t} else {\n\t\t\t\t\tconnection.getConsumerMessage(consumer.id)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (p *protocol) Ping(ctx context.Context, peer p2pcrypto.PublicKey) error {\n\tplogger := p.logger.WithFields(log.String(\"type\", \"ping\"), log.String(\"to\", peer.String()))\n\tplogger.Debug(\"send ping request\")\n\n\tdata, err := types.InterfaceToBytes(p.local)\n\tif err != nil {\n\t\treturn err\n\t}\n\tch := make(chan []byte, 1)\n\tfoo := func(msg []byte) {\n\t\tplogger.Debug(\"handle ping response\")\n\t\tsender := &node.Info{}\n\t\terr := types.BytesToInterface(msg, sender)\n\n\t\tif err != nil {\n\t\t\tplogger.With().Warning(\"got unreadable pong\", log.Err(err))\n\t\t\treturn\n\t\t}\n\t\t// TODO: if we pinged it we already have id so no need to update,\n\t\t// but what if id or listen address has changed?\n\t\tch <- sender.ID.Bytes()\n\t}\n\n\terr = p.msgServer.SendRequest(ctx, server.PingPong, data, peer, foo, func(err error) {})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttimeout := time.NewTimer(MessageTimeout) // todo: check whether this is useless because of `requestLifetime`\n\tselect {\n\tcase id := <-ch:\n\t\tif id == nil {\n\t\t\treturn errors.New(\"failed sending message\")\n\t\t}\n\t\tif !bytes.Equal(id, peer.Bytes()) {\n\t\t\treturn errors.New(\"got pong with different public key\")\n\t\t}\n\tcase <-timeout.C:\n\t\treturn errors.New(\"ping timeout\")\n\t}\n\n\treturn nil\n}", "func PingTimeout(addr string, timeout int) (PingResponse, error) {\n\tctx := context.Background()\n\tctx, cancel := context.WithTimeout(ctx, time.Duration(timeout)*time.Millisecond)\n\tdefer cancel()\n\treturn PingContext(ctx, addr)\n}", "func Ping(ctx context.Context, h host.Host, p peer.ID) <-chan Result {\n\ts, err := h.NewStream(ctx, p, ID)\n\tif err != nil {\n\t\tch := make(chan Result, 1)\n\t\tch <- Result{Error: err}\n\t\tclose(ch)\n\t\treturn ch\n\t}\n\n\tctx, cancel := context.WithCancel(ctx)\n\n\tout := make(chan Result)\n\tgo func() {\n\t\tdefer close(out)\n\t\tdefer cancel()\n\n\t\tfor ctx.Err() == nil {\n\t\t\tvar res Result\n\t\t\tres.RTT, res.Error = ping(s)\n\n\t\t\t// canceled, ignore everything.\n\t\t\tif ctx.Err() != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// No error, record the RTT.\n\t\t\tif res.Error == nil {\n\t\t\t\th.Peerstore().RecordLatency(p, res.RTT)\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase out <- res:\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\tgo func() {\n\t\t// forces the ping to abort.\n\t\t<-ctx.Done()\n\t\ts.Reset()\n\t}()\n\n\treturn out\n}", "func PingScan(outFileName string, workerCount int, targetFileName string, timeoutSeconds int) (error) {\n\n\ttimeout = time.Duration(timeoutSeconds) * time.Second\n\t// GET SUBNET LIST\n\tvar targets[] string\n\tvar err error\n\n\tif targetFileName == \"\" {\n\t\t// GET TARGET LIST\n\t\tDebug(\"Using RFC1918 subnets as targets\")\n\t\ttargets, err = makeHostList(RFC1918Subnets)\n\t}else {\n\t\t// PARSE FROM FILE\n\t\tDebug(\"Reading targets from file \" + targetFileName)\n\t\ttargets, err = getTargetsFromFile(targetFileName)\n\t}\n\t\n\tif err != nil{\n\t\treturn err\n\t}\n\tDebug(fmt.Sprintf(\"Identified %d targets :)\", len(targets)))\n\tnumPings = len(targets)\n\tnumPingsFinished = 0\n\t// ALLOCATE TASKS TO WORKERS\n\tDebug(\"Allocating tasks\")\n\tgo allocate(targets)\n\n\t// HANDLE RESULTS OF WORKER THREADS\n\tdone := make(chan bool)\n\tgo handlePongs(done, outFileName)\n\n\t// START WORKERS\n\tcreateWorkerPool(workerCount)\n\t<- done\n\n\t\n\treturn nil\n}", "func pong(pings <-chan string, pongs chan<- string) {\n\tmsg := <- pings\n\tpongs <- msg\n}", "func PingHosts(ipBase string, ipRange []int) {\n\n\tvar wg sync.WaitGroup\n\tcmd := LibConfig.SysCommands[\"PING\"] + \" -q -W 1 -c 1 \" + ipBase\n\n\tfor i := ipRange[0]; i < ipRange[1]; i++ {\n\t\twg.Add(1)\n\n\t\t// allow threaded system command calls to finish asynchronously\n\t\tgo func(i int, w *sync.WaitGroup) {\n\t\t\tdefer w.Done()\n\t\t\tRunCommand(cmd + strconv.Itoa(i))\n\t\t}(i, &wg)\n\t}\n\n\twg.Wait()\n\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
pingRemote make a synchronous ping to a remote cluster. Return is error if ping is unsuccessful and nil on success.
func (rcs *Service) pingRemote(rc *model.RemoteCluster) error { frame, err := makePingFrame(rc) if err != nil { return err } url := fmt.Sprintf("%s/%s", rc.SiteURL, PingURL) resp, err := rcs.sendFrameToRemote(PingTimeout, rc, frame, url) if err != nil { return err } ping := model.RemoteClusterPing{} err = json.Unmarshal(resp, &ping) if err != nil { return err } if err := rcs.server.GetStore().RemoteCluster().SetLastPingAt(rc.RemoteId); err != nil { rcs.server.Log().Log(mlog.LvlRemoteClusterServiceError, "Failed to update LastPingAt for remote cluster", mlog.String("remote", rc.DisplayName), mlog.String("remoteId", rc.RemoteId), mlog.Err(err), ) } rc.LastPingAt = model.GetMillis() if metrics := rcs.server.GetMetrics(); metrics != nil { sentAt := time.Unix(0, ping.SentAt*int64(time.Millisecond)) elapsed := time.Since(sentAt).Seconds() metrics.ObserveRemoteClusterPingDuration(rc.RemoteId, elapsed) // we approximate clock skew between remotes. skew := elapsed/2 - float64(ping.RecvAt-ping.SentAt)/1000 metrics.ObserveRemoteClusterClockSkew(rc.RemoteId, skew) } rcs.server.Log().Log(mlog.LvlRemoteClusterServiceDebug, "Remote cluster ping", mlog.String("remote", rc.DisplayName), mlog.String("remoteId", rc.RemoteId), mlog.Int64("SentAt", ping.SentAt), mlog.Int64("RecvAt", ping.RecvAt), mlog.Int64("Diff", ping.RecvAt-ping.SentAt), ) return nil }
[ "func (rn *RemoteNode) Ping() error {\n\tmsg, err := rn.LocalNode.NewPingMessage()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = rn.SendMessageSync(msg, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (rn *RemoteNode) Ping() error {\n\tmsg, err := NewPingMessage()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = rn.SendMessageSync(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (cc *Cluster) AsyncPing(args ...interface{}) redis.Future {\n\treturn cc.AsyncCall(cmdPing, args...)\n}", "func ping(hosts []string, returnUnavailable bool) []string {\n\tvar toReturn []string\n\tvar cmds []*exec.Cmd\n\n\t// Start pinging:\n\tfor _, host := range hosts {\n\t\tlog.Println(\"Pinging\", host)\n\t\t// cmd := exec.Command(\"ssh\", \"-o ConnectTimeout=1\", host, \"echo\")\n\t\tcmd := exec.Command(\"nc\", \"-z\", \"-w 1\", host, \"22\")\n\t\tcmd.Start()\n\t\tcmds = append(cmds, cmd)\n\t}\n\n\t// Read result of the pings:\n\tfor i, cmd := range cmds {\n\t\tif err := cmd.Wait(); err != nil {\n\t\t\tlog.Println(\"Unavailable host:\", hosts[i], \"ping error:\", err)\n\t\t\tif returnUnavailable {\n\t\t\t\ttoReturn = append(toReturn, hosts[i])\n\t\t\t}\n\t\t} else {\n\t\t\tif !returnUnavailable {\n\t\t\t\ttoReturn = append(toReturn, hosts[i])\n\t\t\t}\n\t\t}\n\t}\n\n\treturn toReturn\n}", "func (my *MySQL) Ping() (err os.Error) {\n defer my.unlock()\n defer catchOsError(&err)\n my.lock()\n\n if my.conn == nil {\n return NOT_CONN_ERROR\n }\n if my.unreaded_rows {\n return UNREADED_ROWS_ERROR\n }\n\n // Send command\n my.sendCmd(_COM_PING)\n // Get server response\n my.getResult(nil)\n\n return\n}", "func (c *Connector) Ping() (err error) {\n\turl, err := c.getURL(\"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, _ := http.NewRequest(\"GET\", url, nil)\n\treq.Header.Add(\"content-type\", \"application/json\")\n\treq.Header.Add(\"cache-control\", \"no-cache\")\n\n\tres, err := c.getHTTPClient().Do(req)\n\tif err != nil {\n\t\treturn err\n\t} else if res.StatusCode != http.StatusOK {\n\t\tdefer res.Body.Close()\n\t\tbody, _ := ioutil.ReadAll(res.Body)\n\t\terr = fmt.Errorf(\"%s\", string(body))\n\t}\n\treturn err\n}", "func (session *pureSession) ping() error {\n\tif session.connection == nil {\n\t\treturn fmt.Errorf(\"failed to ping: Session has been released\")\n\t}\n\t// send ping request\n\trs, err := session.execute(`RETURN \"NEBULA GO PING\"`)\n\t// check connection level error\n\tif err != nil {\n\t\treturn fmt.Errorf(\"session ping failed, %s\" + err.Error())\n\t}\n\t// check session level error\n\tif !rs.IsSucceed() {\n\t\treturn fmt.Errorf(\"session ping failed, %s\" + rs.GetErrorMsg())\n\t}\n\treturn nil\n}", "func (c *Client) Ping(checkAllMetaServers bool) error {\n\tc.mu.RLock()\n\tserver := c.metaServers[0]\n\tc.mu.RUnlock()\n\turl := c.url(server) + \"/ping\"\n\tif checkAllMetaServers {\n\t\turl = url + \"?all=true\"\n\t}\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode == http.StatusOK {\n\t\treturn nil\n\t}\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn fmt.Errorf(string(b))\n}", "func (s *Syncthing) Ping(ctx context.Context, local bool) bool {\n\t_, err := s.APICall(ctx, \"rest/system/ping\", \"GET\", 200, nil, local, nil, false, 0)\n\tif err == nil {\n\t\treturn true\n\t}\n\tif strings.Contains(err.Error(), \"Client.Timeout\") {\n\t\treturn true\n\t}\n\toktetoLog.Infof(\"error pinging syncthing: %s\", err.Error())\n\treturn false\n}", "func pingLocal() {\n\tp, err := icmp.NewPing(args, cfg)\n\tif err != nil {\n\t\tprintln(err.Error())\n\t}\n\tif p == nil {\n\t\treturn\n\t}\n\tif !p.IsCIDR() {\n\t\tresp := p.Run()\n\t\tp.PrintPretty(resp)\n\t}\n}", "func (r *vtmClient) Ping() (bool, error) {\n\tif err := r.apiGet(vtmAPIPing, nil, nil); err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}", "func (m *Manager) Ping() string {\n\tnodes := m.Nodes()\n\tcommittedNodesLen := len(nodes)\n\n\tif committedNodesLen > 0 {\n\t\tnode := nodes[0]\n\n\t\tres, err := http.Get(node.LocalIP)\n\n\t\tif err != nil {\n\t\t\tlog.Print(\"Target horde node is either unhealthy or down!\", err)\n\t\t}\n\n\t\tdefer res.Body.Close()\n\n\t\tif res.StatusCode == http.StatusOK {\n\t\t\t_, err := ioutil.ReadAll(res.Body)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(\"Failed to read body\", err)\n\n\t\t\t\treturn \"pang\"\n\t\t\t}\n\n\t\t\treturn \"pong\"\n\t\t}\n\t}\n\n\treturn \"pang\"\n}", "func ping(c redis.Conn) error {\n\t// Send PING command to Redis\n\t// PING command returns a Redis \"Simple String\"\n\t// Use redis.String to convert the interface type to string\n\ts, err := redis.String(c.Do(\"PING\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// fmt.Println(\"PING Response = \", s)\n\tfmt.Print(\"Redis connection \")\n\n\tif s == \"PONG\" {\n\t\tcolor.Green(\"Success\\n\")\n\t} else {\n\t\tcolor.Red(\"Error\\n\")\n\t}\n\n\t// Output: PONG\n\treturn nil\n}", "func (c *Conn) Ping() error {\n\tresponse := c.client.Cmd(cmdPing)\n\tif !isOK(response) {\n\t\treturn errx.Errorf(\"ping command failed\")\n\t}\n\treturn nil\n}", "func ping(c redis.Conn) error {\n\t// Send PING command to Redis\n\t// PING command returns a Redis \"Simple String\"\n\t// Use redis.String to convert the interface type to string\n\ts, err := redis.String(c.Do(\"PING\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"PING Response = %s\\n\", s)\n\t// Output: PONG\n\n\treturn nil\n}", "func (c *Client) DeepPing() error {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\treturn c.DeepPingWithContext(ctx)\n}", "func ConnectIfVolumeIsRemote(s *state.State, poolName string, projectName string, volumeName string, volumeType int, networkCert *shared.CertInfo, serverCert *shared.CertInfo, r *http.Request) (incus.InstanceServer, error) {\n\tlocalNodeID := s.DB.Cluster.GetNodeID()\n\tvar err error\n\tvar nodes []db.NodeInfo\n\tvar poolID int64\n\terr = s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error {\n\t\tpoolID, err = tx.GetStoragePoolID(ctx, poolName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tnodes, err = tx.GetStorageVolumeNodes(ctx, poolID, projectName, volumeName, volumeType)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil && err != db.ErrNoClusterMember {\n\t\treturn nil, err\n\t}\n\n\t// If volume uses a remote storage driver and so has no explicit cluster member, then we need to check\n\t// whether it is exclusively attached to remote instance, and if so then we need to forward the request to\n\t// the node whereit is currently used. This avoids conflicting with another member when using it locally.\n\tif err == db.ErrNoClusterMember {\n\t\t// GetStoragePoolVolume returns a volume with an empty Location field for remote drivers.\n\t\tvar dbVolume *db.StorageVolume\n\t\terr = s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error {\n\t\t\tdbVolume, err = tx.GetStoragePoolVolume(ctx, poolID, projectName, volumeType, volumeName, true)\n\t\t\treturn err\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tremoteInstance, err := storagePools.VolumeUsedByExclusiveRemoteInstancesWithProfiles(s, poolName, projectName, &dbVolume.StorageVolume)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed checking if volume %q is available: %w\", volumeName, err)\n\t\t}\n\n\t\tif remoteInstance != nil {\n\t\t\tvar instNode db.NodeInfo\n\t\t\terr := s.DB.Cluster.Transaction(context.TODO(), func(ctx context.Context, tx *db.ClusterTx) error {\n\t\t\t\tinstNode, err = tx.GetNodeByName(ctx, remoteInstance.Node)\n\t\t\t\treturn err\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Failed getting cluster member info for %q: %w\", remoteInstance.Node, err)\n\t\t\t}\n\n\t\t\t// Replace node list with instance's cluster member node (which might be local member).\n\t\t\tnodes = []db.NodeInfo{instNode}\n\t\t} else {\n\t\t\t// Volume isn't exclusively attached to an instance. Use local cluster member.\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\n\tnodeCount := len(nodes)\n\tif nodeCount > 1 {\n\t\treturn nil, fmt.Errorf(\"More than one cluster member has a volume named %q. Please target a specific member\", volumeName)\n\t} else if nodeCount < 1 {\n\t\t// Should never get here.\n\t\treturn nil, fmt.Errorf(\"Volume %q has empty cluster member list\", volumeName)\n\t}\n\n\tnode := nodes[0]\n\tif node.ID == localNodeID {\n\t\t// Use local cluster member if volume belongs to this local member.\n\t\treturn nil, nil\n\t}\n\n\t// Connect to remote cluster member.\n\treturn Connect(node.Address, networkCert, serverCert, r, false)\n}", "func (ar *AuthRPC) Ping(req *rpc.ReqKeepAlive, resp *rpc.RespKeepAlive) error {\n\treturn nil\n}", "func (l *LTCWallet) PingRPC() error {\n\treturn ltc.PingRPC(l.Testnet, l.RPCInfo)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsUserInProtectBranchWhitelist returns true if given user is in the whitelist of a branch in a repository.
func IsUserInProtectBranchWhitelist(repoID, userID int64, branch string) bool { has, err := x.Where("repo_id = ?", repoID).And("user_id = ?", userID).And("name = ?", branch).Get(new(ProtectBranchWhitelist)) return has && err == nil }
[ "func (c *cachingStore) IsBackendUserAllowed(ctx context.Context, backendUser, backendID string) (bool, error) {\n\treturn c.BackingStore.IsBackendUserAllowed(ctx, backendUser, backendID)\n}", "func (cfg *Config) inWhitelist(addr net.Addr) bool {\n\tif len(cfg.Whitelists) == 0 {\n\t\treturn false\n\t}\n\n\thost, _, err := net.SplitHostPort(addr.String())\n\tif err != nil {\n\t\tlog.Warnf(\"Unable to SplitHostPort on '%s': %v\", addr, err)\n\t\treturn false\n\t}\n\tip := net.ParseIP(host)\n\tif ip == nil {\n\t\tlog.Warnf(\"Unable to parse IP '%s'\", addr)\n\t\treturn false\n\t}\n\n\tfor _, ipnet := range cfg.Whitelists {\n\t\tif ipnet.Contains(ip) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func RefIsBranch(dir string, ref string, gitter Gitter) (bool, error) {\n\tremoteBranches, err := gitter.RemoteBranches(dir)\n\tif err != nil {\n\t\treturn false, errors.Wrapf(err, \"error getting remote branches to find provided ref %s\", ref)\n\t}\n\tfor _, b := range remoteBranches {\n\t\tif strings.Contains(b, ref) {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}", "func isInWhitelist(ip string) bool {\n\tfor _, wl := range whitelist {\n\t\tif strings.HasPrefix(ip, wl) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (_Consents *consentsCaller) IsAllowed(ctx context.Context, userId [8]byte, appName string, action uint8, dataType string) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\n\terr := _Consents.contract.Call(&bind.CallOpts{Context: ctx}, out, \"isAllowed\", userId, appName, action, dataType)\n\treturn *ret0, err\n}", "func (_Token *TokenCaller) IsWhitelisted(opts *bind.CallOpts, account common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Token.contract.Call(opts, out, \"isWhitelisted\", account)\n\treturn *ret0, err\n}", "func IsBranch(name string) bool {\n\treturn plumbing.ReferenceName(name).IsBranch()\n}", "func IsBranchInSync(branchName string) bool {\n\tif HasTrackingBranch(branchName) {\n\t\tlocalSha := GetBranchSha(branchName)\n\t\tremoteSha := GetBranchSha(GetTrackingBranchName(branchName))\n\t\treturn localSha == remoteSha\n\t}\n\treturn true\n}", "func checkBranchProtection(repo *github.Repository) {\n\n\tneedsUpdate := false\n\treviewContexts := []string{defaultStatusCheck}\n\n\tprotection, response, err := client.Repositories.GetBranchProtection(ctx, repo.GetOwner().GetLogin(), repo.GetName(), branchToProtect)\n\tif err != nil && response.StatusCode != 404 {\n\t\t// we don't care about 404 responses, as this just tells us it's not protected\n\t\tprintln(\"Could not check branch protection\", err.Error())\n\t\treturn\n\t}\n\n\tif response.StatusCode == 404 {\n\t\tneedsUpdate = true\n\t} else {\n\t\treviews := protection.GetRequiredPullRequestReviews()\n\t\tif reviews == nil || reviews.RequiredApprovingReviewCount != 1 {\n\t\t\tneedsUpdate = true\n\t\t}\n\n\t\tstatusChecks := protection.GetRequiredStatusChecks()\n\t\tif statusChecks == nil || statusChecks.Strict != true {\n\t\t\tneedsUpdate = true\n\t\t}\n\n\t\tif statusChecks == nil || !contains(statusChecks.Contexts, defaultStatusCheck) {\n\t\t\tneedsUpdate = true\n\t\t}\n\n\t\tif strings.Contains(repo.GetName(), additionalStatusCheckContains) {\n\n\t\t\tif len(statusChecks.Contexts) != len(append(additionalStatusChecks, reviewContexts...)) {\n\t\t\t\tneedsUpdate = true\n\t\t\t\treviewContexts = append(additionalStatusChecks, reviewContexts...)\n\t\t\t} else {\n\t\t\t\tfor _, reviewContext := range additionalStatusChecks {\n\t\t\t\t\tif !contains(statusChecks.Contexts, reviewContext) {\n\t\t\t\t\t\tneedsUpdate = true\n\t\t\t\t\t\treviewContexts = append(additionalStatusChecks, reviewContexts...)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif needsUpdate {\n\t\tprintln(\"Branch protection isn't correct, so updating it\")\n\n\t\tprotectionRequest := &github.ProtectionRequest{\n\t\t\tRequiredStatusChecks: &github.RequiredStatusChecks{\n\t\t\t\tStrict: true,\n\t\t\t\tContexts: reviewContexts,\n\t\t\t},\n\t\t\tRequiredPullRequestReviews: &github.PullRequestReviewsEnforcementRequest{\n\t\t\t\tDismissStaleReviews: false,\n\t\t\t\tRequireCodeOwnerReviews: false,\n\t\t\t\tRequiredApprovingReviewCount: 1,\n\t\t\t},\n\t\t\tEnforceAdmins: false,\n\t\t}\n\t\tclient.Repositories.UpdateBranchProtection(ctx, repo.GetOwner().GetLogin(), repo.GetName(), branchToProtect, protectionRequest)\n\t}\n}", "func (_Token *TokenCallerSession) IsWhitelisted(account common.Address) (bool, error) {\n\treturn _Token.Contract.IsWhitelisted(&_Token.CallOpts, account)\n}", "func (_StockToken *StockTokenCaller) IsWhitelisted(opts *bind.CallOpts, _address common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _StockToken.contract.Call(opts, out, \"isWhitelisted\", _address)\n\treturn *ret0, err\n}", "func (_IdentityRegistry *IdentityRegistryCaller) IsWhitelisted(opts *bind.CallOpts, _address common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _IdentityRegistry.contract.Call(opts, out, \"isWhitelisted\", _address)\n\treturn *ret0, err\n}", "func (b *Blacklist) IsAllowed(host string) bool {\n\tb.mutex.RLock()\n\tdefer b.mutex.RUnlock()\n\n\t_, ok := b.hosts[hash(host)]\n\tif ok {\n\t\treturn false\n\t}\n\n\tfor _, rx := range b.hostsRX {\n\t\tif rx.MatchString(host) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func UpdateOrgProtectBranch(repo *Repository, protectBranch *ProtectBranch, whitelistUserIDs, whitelistTeamIDs string) (err error) {\n\tif err = repo.GetOwner(); err != nil {\n\t\treturn fmt.Errorf(\"GetOwner: %v\", err)\n\t} else if !repo.Owner.IsOrganization() {\n\t\treturn fmt.Errorf(\"expect repository owner to be an organization\")\n\t}\n\n\thasUsersChanged := false\n\tvalidUserIDs := tool.StringsToInt64s(strings.Split(protectBranch.WhitelistUserIDs, \",\"))\n\tif protectBranch.WhitelistUserIDs != whitelistUserIDs {\n\t\thasUsersChanged = true\n\t\tuserIDs := tool.StringsToInt64s(strings.Split(whitelistUserIDs, \",\"))\n\t\tvalidUserIDs = make([]int64, 0, len(userIDs))\n\t\tfor _, userID := range userIDs {\n\t\t\tif !Perms.Authorize(context.TODO(), userID, repo.ID, AccessModeWrite,\n\t\t\t\tAccessModeOptions{\n\t\t\t\t\tOwnerID: repo.OwnerID,\n\t\t\t\t\tPrivate: repo.IsPrivate,\n\t\t\t\t},\n\t\t\t) {\n\t\t\t\tcontinue // Drop invalid user ID\n\t\t\t}\n\n\t\t\tvalidUserIDs = append(validUserIDs, userID)\n\t\t}\n\n\t\tprotectBranch.WhitelistUserIDs = strings.Join(tool.Int64sToStrings(validUserIDs), \",\")\n\t}\n\n\thasTeamsChanged := false\n\tvalidTeamIDs := tool.StringsToInt64s(strings.Split(protectBranch.WhitelistTeamIDs, \",\"))\n\tif protectBranch.WhitelistTeamIDs != whitelistTeamIDs {\n\t\thasTeamsChanged = true\n\t\tteamIDs := tool.StringsToInt64s(strings.Split(whitelistTeamIDs, \",\"))\n\t\tteams, err := GetTeamsHaveAccessToRepo(repo.OwnerID, repo.ID, AccessModeWrite)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"GetTeamsHaveAccessToRepo [org_id: %d, repo_id: %d]: %v\", repo.OwnerID, repo.ID, err)\n\t\t}\n\t\tvalidTeamIDs = make([]int64, 0, len(teams))\n\t\tfor i := range teams {\n\t\t\tif teams[i].HasWriteAccess() && com.IsSliceContainsInt64(teamIDs, teams[i].ID) {\n\t\t\t\tvalidTeamIDs = append(validTeamIDs, teams[i].ID)\n\t\t\t}\n\t\t}\n\n\t\tprotectBranch.WhitelistTeamIDs = strings.Join(tool.Int64sToStrings(validTeamIDs), \",\")\n\t}\n\n\t// Make sure protectBranch.ID is not 0 for whitelists\n\tif protectBranch.ID == 0 {\n\t\tif _, err = x.Insert(protectBranch); err != nil {\n\t\t\treturn fmt.Errorf(\"Insert: %v\", err)\n\t\t}\n\t}\n\n\t// Merge users and members of teams\n\tvar whitelists []*ProtectBranchWhitelist\n\tif hasUsersChanged || hasTeamsChanged {\n\t\tmergedUserIDs := make(map[int64]bool)\n\t\tfor _, userID := range validUserIDs {\n\t\t\t// Empty whitelist users can cause an ID with 0\n\t\t\tif userID != 0 {\n\t\t\t\tmergedUserIDs[userID] = true\n\t\t\t}\n\t\t}\n\n\t\tfor _, teamID := range validTeamIDs {\n\t\t\tmembers, err := GetTeamMembers(teamID)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"GetTeamMembers [team_id: %d]: %v\", teamID, err)\n\t\t\t}\n\n\t\t\tfor i := range members {\n\t\t\t\tmergedUserIDs[members[i].ID] = true\n\t\t\t}\n\t\t}\n\n\t\twhitelists = make([]*ProtectBranchWhitelist, 0, len(mergedUserIDs))\n\t\tfor userID := range mergedUserIDs {\n\t\t\twhitelists = append(whitelists, &ProtectBranchWhitelist{\n\t\t\t\tProtectBranchID: protectBranch.ID,\n\t\t\t\tRepoID: repo.ID,\n\t\t\t\tName: protectBranch.Name,\n\t\t\t\tUserID: userID,\n\t\t\t})\n\t\t}\n\t}\n\n\tsess := x.NewSession()\n\tdefer sess.Close()\n\tif err = sess.Begin(); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err = sess.ID(protectBranch.ID).AllCols().Update(protectBranch); err != nil {\n\t\treturn fmt.Errorf(\"Update: %v\", err)\n\t}\n\n\t// Refresh whitelists\n\tif hasUsersChanged || hasTeamsChanged {\n\t\tif _, err = sess.Delete(&ProtectBranchWhitelist{ProtectBranchID: protectBranch.ID}); err != nil {\n\t\t\treturn fmt.Errorf(\"delete old protect branch whitelists: %v\", err)\n\t\t} else if _, err = sess.Insert(whitelists); err != nil {\n\t\t\treturn fmt.Errorf(\"insert new protect branch whitelists: %v\", err)\n\t\t}\n\t}\n\n\treturn sess.Commit()\n}", "func IsBranchOfRepoRequirePullRequest(repoID int64, name string) bool {\n\tprotectBranch, err := GetProtectBranchOfRepoByName(repoID, name)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn protectBranch.Protected && protectBranch.RequirePullRequest\n}", "func IsUserAuthorizedForProjectTree(user *auth.User, projectSFID string) bool {\n\t// Previously, we checked for user.Admin - admins should be in a separate role\n\t// Previously, we checked for user.Allowed, which is currently not used (future flag that is currently not implemented)\n\treturn user.IsUserAuthorized(auth.Project, projectSFID, true)\n}", "func isWhitelisted(ip net.IP, whitelist []*net.IPNet) bool {\n\tfor _, network := range whitelist {\n\t\tif network.Contains(ip) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (a *ACLs) IsAllowed(c context.Context, host, project string) (bool, error) {\n\t// Convert Gerrit to Git hosts.\n\tif strings.HasSuffix(host, \"-review.googlesource.com\") {\n\t\thost = strings.TrimSuffix(host, \"-review.googlesource.com\") + \".googlesource.com\"\n\t}\n\thacls, configured := a.hosts[host]\n\tif !configured {\n\t\treturn false, nil\n\t}\n\tif pacls, projKnown := hacls.projects[project]; projKnown {\n\t\treturn a.belongsTo(c, hacls.readers, pacls.readers)\n\t}\n\treturn a.belongsTo(c, hacls.readers)\n}", "func (k Keeper) IsUserBlocked(ctx sdk.Context, blocker, blocked, subspace string) bool {\n\treturn k.rk.HasUserBlocked(ctx, blocker, blocked, subspace)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetProtectBranchOfRepoByName returns ProtectBranch by branch name in given repository.
func GetProtectBranchOfRepoByName(repoID int64, name string) (*ProtectBranch, error) { protectBranch := &ProtectBranch{ RepoID: repoID, Name: name, } has, err := x.Get(protectBranch) if err != nil { return nil, err } else if !has { return nil, ErrBranchNotExist{args: map[string]any{"name": name}} } return protectBranch, nil }
[ "func GetBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/branch_protections/{name} repository repoGetBranchProtection\n\t// ---\n\t// summary: Get a specific branch protection for the repository\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: name\n\t// in: path\n\t// description: name of protected branch\n\t// type: string\n\t// required: true\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\n\trepo := ctx.Repo.Repository\n\tbpName := ctx.Params(\":name\")\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != repo.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusOK, convert.ToBranchProtection(bp))\n}", "func (c *client) GetBranchProtection(org, repo, branch string) (*BranchProtection, error) {\n\tdurationLogger := c.log(\"GetBranchProtection\", org, repo, branch)\n\tdefer durationLogger()\n\n\tcode, body, err := c.requestRaw(&request{\n\t\tmethod: http.MethodGet,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/branches/%s/protection\", org, repo, branch),\n\t\torg: org,\n\t\t// GitHub returns 404 for this call if either:\n\t\t// - The branch is not protected\n\t\t// - The access token used does not have sufficient privileges\n\t\t// We therefore need to introspect the response body.\n\t\texitCodes: []int{200, 404},\n\t})\n\n\tswitch {\n\tcase err != nil:\n\t\treturn nil, err\n\tcase code == 200:\n\t\tvar bp BranchProtection\n\t\tif err := json.Unmarshal(body, &bp); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &bp, nil\n\tcase code == 404:\n\t\t// continue\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unexpected status code: %d\", code)\n\t}\n\n\tvar ge githubError\n\tif err := json.Unmarshal(body, &ge); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If the error was because the branch is not protected, we return a\n\t// nil pointer to indicate this.\n\tif ge.Message == \"Branch not protected\" {\n\t\treturn nil, nil\n\t}\n\n\t// Otherwise we got some other 404 error.\n\treturn nil, fmt.Errorf(\"getting branch protection 404: %s\", ge.Message)\n}", "func GetRepoByName(name string)(*Repository,bool,error) {\n\tvar repo Repository\n\tres, err := MysqlDB.Table(\"ansible_repository\").Where(\"repo_name=?\", name).Get(&repo)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn nil,false,err\n\t}\n\treturn &repo,res,nil\n}", "func (g *github) GetBranchName() string { return g.branchName }", "func (novis *Novis) GetBranch(name string) *Branch {\n\treturn novis.Get(name)\n}", "func GetProtectBranchesByRepoID(repoID int64) ([]*ProtectBranch, error) {\n\tprotectBranches := make([]*ProtectBranch, 0, 2)\n\treturn protectBranches, x.Where(\"repo_id = ? and protected = ?\", repoID, true).Asc(\"name\").Find(&protectBranches)\n}", "func GetBranch(name string) *Branch {\n\treturn novis.Get(name)\n}", "func (w *Wiki) Branch(name string) (*Wiki, error) {\n\n\t// never checkout master in a linked repo\n\tif name == \"master\" {\n\t\treturn w, nil\n\t}\n\n\t// find branch\n\tif exist, err := w.hasBranch(name); !exist {\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, git.ErrBranchNotFound\n\t}\n\n\t// check out the branch in cache/branch/<name>;\n\t// if it already has been checked out, this does nothing\n\tdir, err := w.checkoutBranch(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create a new Wiki at this location\n\treturn NewWiki(dir)\n}", "func GetBranchProtection(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *BranchProtectionState, opts ...pulumi.ResourceOption) (*BranchProtection, error) {\n\tvar resource BranchProtection\n\terr := ctx.ReadResource(\"gitlab:index/branchProtection:BranchProtection\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (s *BranchesService) ProtectBranch(pid interface{}, branch string, opts *ProtectBranchOptions, options ...RequestOptionFunc) (*Branch, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects/%s/repository/branches/%s/protect\", PathEscape(project), url.PathEscape(branch))\n\n\treq, err := s.client.NewRequest(http.MethodPut, u, opts, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tb := new(Branch)\n\tresp, err := s.client.Do(req, b)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn b, resp, nil\n}", "func handleRepo(ctx context.Context, client *github.Client, repo *github.Repository) error {\n\topt := &github.ListOptions{\n\t\tPerPage: 100,\n\t}\n\n\tbranches, resp, err := client.Repositories.ListBranches(ctx, *repo.Owner.Login, *repo.Name, opt)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, branch := range branches {\n\t\tif branch.GetName() == \"master\" && in(orgs, *repo.Owner.Login) {\n\t\t\t// we must get the individual branch for the branch protection to work\n\t\t\tb, _, err := client.Repositories.GetBranch(ctx, *repo.Owner.Login, *repo.Name, branch.GetName())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// return early if it is already protected\n\t\t\tif b.GetProtected() {\n\t\t\t\tfmt.Printf(\"[OK] %s:%s is already protected\\n\", *repo.FullName, b.GetName())\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tfmt.Printf(\"[UPDATE] %s:%s will be changed to protected\\n\", *repo.FullName, b.GetName())\n\t\t\tif dryrun {\n\t\t\t\t// return early\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// set the branch to be protected\n\t\t\tif _, _, err := client.Repositories.UpdateBranchProtection(ctx, *repo.Owner.Login, *repo.Name, b.GetName(), &github.ProtectionRequest{\n\t\t\t\tRequiredStatusChecks: &github.RequiredStatusChecks{\n\t\t\t\t\tStrict: false,\n\t\t\t\t\tContexts: []string{},\n\t\t\t\t},\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (s *splicer) branch(name string) error {\n\treturn s.gitCall(\"checkout\", \"-B\", name, \"master\")\n}", "func checkBranchName(repo *models.Repository, name string) error {\n\tgitRepo, err := git.OpenRepository(repo.RepoPath())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer gitRepo.Close()\n\n\tbranches, _, err := GetBranches(repo, 0, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, branch := range branches {\n\t\tif branch.Name == name {\n\t\t\treturn models.ErrBranchAlreadyExists{\n\t\t\t\tBranchName: branch.Name,\n\t\t\t}\n\t\t} else if (len(branch.Name) < len(name) && branch.Name+\"/\" == name[0:len(branch.Name)+1]) ||\n\t\t\t(len(branch.Name) > len(name) && name+\"/\" == branch.Name[0:len(name)+1]) {\n\t\t\treturn models.ErrBranchNameConflict{\n\t\t\t\tBranchName: branch.Name,\n\t\t\t}\n\t\t}\n\t}\n\n\tif _, err := gitRepo.GetTag(name); err == nil {\n\t\treturn models.ErrTagAlreadyExists{\n\t\t\tTagName: name,\n\t\t}\n\t}\n\n\treturn nil\n}", "func (r *Repository) GitLabGetRepositoryByName(ctx context.Context, repositoryName string) (*repoModels.RepositoryDBModel, error) {\n\tcondition := expression.Key(repoModels.RepositoryNameColumn).Equal(expression.Value(repositoryName))\n\tfilter := expression.Name(repoModels.RepositoryTypeColumn).Equal(expression.Value(utils.GitLabLower))\n\trecord, err := r.getRepositoryWithConditionFilter(ctx, condition, filter, repoModels.RepositoryNameIndex)\n\tif err != nil {\n\t\t// Catch the error - return the same error with the appropriate details\n\t\tif _, ok := err.(*utils.GitLabRepositoryNotFound); ok {\n\t\t\treturn nil, &utils.GitLabRepositoryNotFound{\n\t\t\t\tRepositoryName: repositoryName,\n\t\t\t}\n\t\t}\n\t\t// Catch the error - return the same error with the appropriate details\n\t\tif _, ok := err.(*utils.GitLabDuplicateRepositoriesFound); ok {\n\t\t\treturn nil, &utils.GitLabDuplicateRepositoriesFound{\n\t\t\t\tRepositoryName: repositoryName,\n\t\t\t}\n\t\t}\n\t\t// Some other error\n\t\treturn nil, err\n\t}\n\n\treturn record, nil\n}", "func (g *github) GetRepoName() string { return g.repoName }", "func (s *BucketService) FindBucketByName(ctx context.Context, orgID influxdb.ID, n string) (*influxdb.Bucket, error) {\n\tspan, ctx := tracing.StartSpanFromContext(ctx)\n\tdefer span.Finish()\n\n\tb, err := s.s.FindBucketByName(ctx, orgID, n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := authorizeReadBucket(ctx, b.OrgID, b.ID); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn b, nil\n}", "func NewBranchProtection(ctx *pulumi.Context,\n\tname string, args *BranchProtectionArgs, opts ...pulumi.ResourceOption) (*BranchProtection, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Branch == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Branch'\")\n\t}\n\tif args.Project == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Project'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource BranchProtection\n\terr := ctx.RegisterResource(\"gitlab:index/branchProtection:BranchProtection\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (c *DeployServerConfig) BranchName() string {\n\treturn strings.Split(c.Ref, \"/\")[1]\n}", "func findBranch(brs []Branch, name string) *Branch {\n\tfor i, b := range brs {\n\t\tif b.Name == name {\n\t\t\treturn &(brs[i])\n\t\t}\n\t}\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsBranchOfRepoRequirePullRequest returns true if branch requires pull request in given repository.
func IsBranchOfRepoRequirePullRequest(repoID int64, name string) bool { protectBranch, err := GetProtectBranchOfRepoByName(repoID, name) if err != nil { return false } return protectBranch.Protected && protectBranch.RequirePullRequest }
[ "func (p *PullRequest) IsForkPullRequest() bool {\n\treturn p.Head.RepoPath() != p.Base.RepoPath()\n}", "func ifPullRequest(issue *github.Issue) bool {\n\tif issue.PullRequestLinks != nil {\n\t\treturn true\n\t}\n\treturn false\n}", "func (o *PullRequestComment) HasPullrequest() bool {\n\tif o != nil && o.Pullrequest != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (c *Client) IsPullRequestMerged(owner, repo string, index int64) (bool, *Response, error) {\n\tif err := escapeValidatePathSegments(&owner, &repo); err != nil {\n\t\treturn false, nil, err\n\t}\n\tstatus, resp, err := c.getStatusCode(\"GET\", fmt.Sprintf(\"/repos/%s/%s/pulls/%d/merge\", owner, repo, index), nil, nil)\n\n\tif err != nil {\n\t\treturn false, resp, err\n\t}\n\n\treturn status == 204, resp, nil\n}", "func (config *GithubConfig) IsPRMergeable(pr *github.PullRequest) (bool, error) {\n\tif pr.Mergeable == nil {\n\t\tvar err error\n\t\tglog.Infof(\"Waiting for mergeability on %q %d\", *pr.Title, *pr.Number)\n\t\t// TODO: determine what a good empirical setting for this is.\n\t\ttime.Sleep(2 * time.Second)\n\t\tpr, err = config.GetPR(*pr.Number)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Unable to get PR# %d: %v\", *pr.Number, err)\n\t\t\treturn false, err\n\t\t}\n\t}\n\tif pr.Mergeable == nil {\n\t\terr := fmt.Errorf(\"No mergeability information for %q %d, Skipping.\", *pr.Title, *pr.Number)\n\t\tglog.Errorf(\"%v\", err)\n\t\treturn false, err\n\t}\n\tif !*pr.Mergeable {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n\n}", "func (r *Repo) IsGitHubRepo() bool { return strings.HasPrefix(r.URI, \"github.com/\") }", "func (a *Client) RepoPullRequestIsMerged(params *RepoPullRequestIsMergedParams, authInfo runtime.ClientAuthInfoWriter) (*RepoPullRequestIsMergedNoContent, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewRepoPullRequestIsMergedParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"repoPullRequestIsMerged\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/repos/{owner}/{repo}/pulls/{index}/merge\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"text/plain\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &RepoPullRequestIsMergedReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*RepoPullRequestIsMergedNoContent), nil\n\n}", "func (cmd InspectCmd) RequiresRepo() bool {\n\treturn true\n}", "func (g *GithubClient) PullIsMergeable(repo models.Repo, pull models.PullRequest, vcsstatusname string) (bool, error) {\n\tgithubPR, err := g.GetPullRequest(repo, pull.Num)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"getting pull request\")\n\t}\n\tstate := githubPR.GetMergeableState()\n\t// We map our mergeable check to when the GitHub merge button is clickable.\n\t// This corresponds to the following states:\n\t// clean: No conflicts, all requirements satisfied.\n\t// Merging is allowed (green box).\n\t// unstable: Failing/pending commit status that is not part of the required\n\t// status checks. Merging is allowed (yellow box).\n\t// has_hooks: GitHub Enterprise only, if a repo has custom pre-receive\n\t// hooks. Merging is allowed (green box).\n\t// See: https://github.com/octokit/octokit.net/issues/1763\n\tif state != \"clean\" && state != \"unstable\" && state != \"has_hooks\" {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}", "func (cmd ConfigCmd) RequiresRepo() bool {\n\treturn false\n}", "func (m *RKSyncMessage) IsChainPullRequestMsg() bool {\n\treturn m.GetStatePullRequest() != nil\n}", "func (b *Client) PullIsMergeable(repo models.Repo, pull models.PullRequest, vcsstatusname string) (bool, error) {\n\tnextPageURL := fmt.Sprintf(\"%s/2.0/repositories/%s/pullrequests/%d/diffstat\", b.BaseURL, repo.FullName, pull.Num)\n\t// We'll only loop 1000 times as a safety measure.\n\tmaxLoops := 1000\n\tfor i := 0; i < maxLoops; i++ {\n\t\tresp, err := b.makeRequest(\"GET\", nextPageURL, nil)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tvar diffStat DiffStat\n\t\tif err := json.Unmarshal(resp, &diffStat); err != nil {\n\t\t\treturn false, errors.Wrapf(err, \"Could not parse response %q\", string(resp))\n\t\t}\n\t\tif err := validator.New().Struct(diffStat); err != nil {\n\t\t\treturn false, errors.Wrapf(err, \"API response %q was missing fields\", string(resp))\n\t\t}\n\t\tfor _, v := range diffStat.Values {\n\t\t\t// These values are undocumented, found via manual testing.\n\t\t\tif *v.Status == \"merge conflict\" || *v.Status == \"local deleted\" {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\tif diffStat.Next == nil || *diffStat.Next == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tnextPageURL = *diffStat.Next\n\t}\n\treturn true, nil\n}", "func onlyGo(repo, branch, goBranch string) bool { return repo == \"go\" }", "func (b *Client) PullIsApproved(repo models.Repo, pull models.PullRequest) (approvalStatus models.ApprovalStatus, err error) {\n\tpath := fmt.Sprintf(\"%s/2.0/repositories/%s/pullrequests/%d\", b.BaseURL, repo.FullName, pull.Num)\n\tresp, err := b.makeRequest(\"GET\", path, nil)\n\tif err != nil {\n\t\treturn approvalStatus, err\n\t}\n\tvar pullResp PullRequest\n\tif err := json.Unmarshal(resp, &pullResp); err != nil {\n\t\treturn approvalStatus, errors.Wrapf(err, \"Could not parse response %q\", string(resp))\n\t}\n\tif err := validator.New().Struct(pullResp); err != nil {\n\t\treturn approvalStatus, errors.Wrapf(err, \"API response %q was missing fields\", string(resp))\n\t}\n\tauthorUUID := *pullResp.Author.UUID\n\tfor _, participant := range pullResp.Participants {\n\t\t// Bitbucket allows the author to approve their own pull request. This\n\t\t// defeats the purpose of approvals so we don't count that approval.\n\t\tif *participant.Approved && *participant.User.UUID != authorUUID {\n\t\t\treturn models.ApprovalStatus{\n\t\t\t\tIsApproved: true,\n\t\t\t}, nil\n\t\t}\n\t}\n\treturn approvalStatus, nil\n}", "func (c *client) IsMergeable(org, repo string, number int, SHA string) (bool, error) {\n\tbackoff := time.Second * 3\n\tmaxTries := 3\n\tfor try := 0; try < maxTries; try++ {\n\t\tpr, err := c.GetPullRequest(org, repo, number)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif pr.Head.SHA != SHA {\n\t\t\treturn false, fmt.Errorf(\"pull request head changed while checking mergeability (%s -> %s)\", SHA, pr.Head.SHA)\n\t\t}\n\t\tif pr.Merged {\n\t\t\treturn false, errors.New(\"pull request was merged while checking mergeability\")\n\t\t}\n\t\tif pr.Mergable != nil {\n\t\t\treturn *pr.Mergable, nil\n\t\t}\n\t\tif try+1 < maxTries {\n\t\t\tc.time.Sleep(backoff)\n\t\t\tbackoff *= 2\n\t\t}\n\t}\n\treturn false, fmt.Errorf(\"reached maximum number of retries (%d) checking mergeability\", maxTries)\n}", "func IsRepo() bool {\n\tout, err := Run(\"rev-parse\", \"--is-inside-work-tree\")\n\treturn err == nil && strings.TrimSpace(out) == \"true\"\n}", "func IsErrErrPullRequestHeadRepoMissing(err error) bool {\n\t_, ok := err.(ErrPullRequestHeadRepoMissing)\n\treturn ok\n}", "func (m *RKSyncMessage) IsStatePullRequestMsg() bool {\n\treturn m.GetStatePullRequest() != nil\n}", "func IsRepoSupported(spec *api.ExternalRepoSpec) bool {\n\t_, ok := SupportedExternalServices[spec.ServiceType]\n\treturn ok\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
UpdateProtectBranch saves branch protection options. If ID is 0, it creates a new record. Otherwise, updates existing record.
func UpdateProtectBranch(protectBranch *ProtectBranch) (err error) { sess := x.NewSession() defer sess.Close() if err = sess.Begin(); err != nil { return err } if protectBranch.ID == 0 { if _, err = sess.Insert(protectBranch); err != nil { return fmt.Errorf("Insert: %v", err) } } if _, err = sess.ID(protectBranch.ID).AllCols().Update(protectBranch); err != nil { return fmt.Errorf("Update: %v", err) } return sess.Commit() }
[ "func EditBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation PATCH /repos/{owner}/{repo}/branch_protections/{name} repository repoEditBranchProtection\n\t// ---\n\t// summary: Edit a branch protections for a repository. Only fields that are set will be changed\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: name\n\t// in: path\n\t// description: name of protected branch\n\t// type: string\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/EditBranchProtectionOption\"\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\t// \"422\":\n\t// \"$ref\": \"#/responses/validationError\"\n\tform := web.GetForm(ctx).(*api.EditBranchProtectionOption)\n\trepo := ctx.Repo.Repository\n\tbpName := ctx.Params(\":name\")\n\tprotectBranch, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif protectBranch == nil || protectBranch.RepoID != repo.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tif form.EnablePush != nil {\n\t\tif !*form.EnablePush {\n\t\t\tprotectBranch.CanPush = false\n\t\t\tprotectBranch.EnableWhitelist = false\n\t\t\tprotectBranch.WhitelistDeployKeys = false\n\t\t} else {\n\t\t\tprotectBranch.CanPush = true\n\t\t\tif form.EnablePushWhitelist != nil {\n\t\t\t\tif !*form.EnablePushWhitelist {\n\t\t\t\t\tprotectBranch.EnableWhitelist = false\n\t\t\t\t\tprotectBranch.WhitelistDeployKeys = false\n\t\t\t\t} else {\n\t\t\t\t\tprotectBranch.EnableWhitelist = true\n\t\t\t\t\tif form.PushWhitelistDeployKeys != nil {\n\t\t\t\t\t\tprotectBranch.WhitelistDeployKeys = *form.PushWhitelistDeployKeys\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif form.EnableMergeWhitelist != nil {\n\t\tprotectBranch.EnableMergeWhitelist = *form.EnableMergeWhitelist\n\t}\n\n\tif form.EnableStatusCheck != nil {\n\t\tprotectBranch.EnableStatusCheck = *form.EnableStatusCheck\n\t}\n\n\tif form.StatusCheckContexts != nil {\n\t\tprotectBranch.StatusCheckContexts = form.StatusCheckContexts\n\t}\n\n\tif form.RequiredApprovals != nil && *form.RequiredApprovals >= 0 {\n\t\tprotectBranch.RequiredApprovals = *form.RequiredApprovals\n\t}\n\n\tif form.EnableApprovalsWhitelist != nil {\n\t\tprotectBranch.EnableApprovalsWhitelist = *form.EnableApprovalsWhitelist\n\t}\n\n\tif form.BlockOnRejectedReviews != nil {\n\t\tprotectBranch.BlockOnRejectedReviews = *form.BlockOnRejectedReviews\n\t}\n\n\tif form.BlockOnOfficialReviewRequests != nil {\n\t\tprotectBranch.BlockOnOfficialReviewRequests = *form.BlockOnOfficialReviewRequests\n\t}\n\n\tif form.DismissStaleApprovals != nil {\n\t\tprotectBranch.DismissStaleApprovals = *form.DismissStaleApprovals\n\t}\n\n\tif form.RequireSignedCommits != nil {\n\t\tprotectBranch.RequireSignedCommits = *form.RequireSignedCommits\n\t}\n\n\tif form.ProtectedFilePatterns != nil {\n\t\tprotectBranch.ProtectedFilePatterns = *form.ProtectedFilePatterns\n\t}\n\n\tif form.UnprotectedFilePatterns != nil {\n\t\tprotectBranch.UnprotectedFilePatterns = *form.UnprotectedFilePatterns\n\t}\n\n\tif form.BlockOnOutdatedBranch != nil {\n\t\tprotectBranch.BlockOnOutdatedBranch = *form.BlockOnOutdatedBranch\n\t}\n\n\tvar whitelistUsers []int64\n\tif form.PushWhitelistUsernames != nil {\n\t\twhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.PushWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\twhitelistUsers = protectBranch.WhitelistUserIDs\n\t}\n\tvar mergeWhitelistUsers []int64\n\tif form.MergeWhitelistUsernames != nil {\n\t\tmergeWhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.MergeWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tmergeWhitelistUsers = protectBranch.MergeWhitelistUserIDs\n\t}\n\tvar approvalsWhitelistUsers []int64\n\tif form.ApprovalsWhitelistUsernames != nil {\n\t\tapprovalsWhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.ApprovalsWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tapprovalsWhitelistUsers = protectBranch.ApprovalsWhitelistUserIDs\n\t}\n\n\tvar whitelistTeams, mergeWhitelistTeams, approvalsWhitelistTeams []int64\n\tif repo.Owner.IsOrganization() {\n\t\tif form.PushWhitelistTeams != nil {\n\t\t\twhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.PushWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\twhitelistTeams = protectBranch.WhitelistTeamIDs\n\t\t}\n\t\tif form.MergeWhitelistTeams != nil {\n\t\t\tmergeWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.MergeWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tmergeWhitelistTeams = protectBranch.MergeWhitelistTeamIDs\n\t\t}\n\t\tif form.ApprovalsWhitelistTeams != nil {\n\t\t\tapprovalsWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.ApprovalsWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tapprovalsWhitelistTeams = protectBranch.ApprovalsWhitelistTeamIDs\n\t\t}\n\t}\n\n\terr = git_model.UpdateProtectBranch(ctx, ctx.Repo.Repository, protectBranch, git_model.WhitelistOptions{\n\t\tUserIDs: whitelistUsers,\n\t\tTeamIDs: whitelistTeams,\n\t\tMergeUserIDs: mergeWhitelistUsers,\n\t\tMergeTeamIDs: mergeWhitelistTeams,\n\t\tApprovalsUserIDs: approvalsWhitelistUsers,\n\t\tApprovalsTeamIDs: approvalsWhitelistTeams,\n\t})\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"UpdateProtectBranch\", err)\n\t\treturn\n\t}\n\n\tisPlainRule := !git_model.IsRuleNameSpecial(bpName)\n\tvar isBranchExist bool\n\tif isPlainRule {\n\t\tisBranchExist = git.IsBranchExist(ctx.Req.Context(), ctx.Repo.Repository.RepoPath(), bpName)\n\t}\n\n\tif isBranchExist {\n\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, bpName); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPrsForBaseBranch\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif !isPlainRule {\n\t\t\tif ctx.Repo.GitRepo == nil {\n\t\t\t\tctx.Repo.GitRepo, err = git.OpenRepository(ctx, ctx.Repo.Repository.RepoPath())\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"OpenRepository\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer func() {\n\t\t\t\t\tctx.Repo.GitRepo.Close()\n\t\t\t\t\tctx.Repo.GitRepo = nil\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\t// FIXME: since we only need to recheck files protected rules, we could improve this\n\t\t\tmatchedBranches, err := git_model.FindAllMatchedBranches(ctx, ctx.Repo.Repository.ID, protectBranch.RuleName)\n\t\t\tif err != nil {\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"FindAllMatchedBranches\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, branchName := range matchedBranches {\n\t\t\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, branchName); err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPrsForBaseBranch\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Reload from db to ensure get all whitelists\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchBy\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.Error(http.StatusInternalServerError, \"New branch protection not found\", err)\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusOK, convert.ToBranchProtection(bp))\n}", "func (c *client) UpdateBranchProtection(org, repo, branch string, config BranchProtectionRequest) error {\n\tdurationLogger := c.log(\"UpdateBranchProtection\", org, repo, branch, config)\n\tdefer durationLogger()\n\n\t_, err := c.request(&request{\n\t\taccept: \"application/vnd.github.luke-cage-preview+json\", // for required_approving_review_count\n\t\tmethod: http.MethodPut,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/branches/%s/protection\", org, repo, branch),\n\t\torg: org,\n\t\trequestBody: config,\n\t\texitCodes: []int{200},\n\t}, nil)\n\treturn err\n}", "func UpdateBranchProtection() error {\n\tvar wg sync.WaitGroup\n\trequests, err := getBranchProtectionRequests()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twg.Add(len(requests))\n\towner, repo := getOwnerRepo()\n\n\tfor _, bp := range requests {\n\t\tgo func(bp BranchProtection) {\n\t\t\tdefer wg.Done()\n\t\t\t_, _, err := cli.Repositories.UpdateBranchProtection(ctx, owner, repo, bp.Branch, bp.Protection)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\t_, err = fmt.Fprintln(writer, fmt.Sprintf(\"branch %v has been protected\", bp.Branch))\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}(bp)\n\t}\n\n\twg.Wait()\n\n\treturn nil\n}", "func (s *BranchesService) ProtectBranch(pid interface{}, branch string, opts *ProtectBranchOptions, options ...RequestOptionFunc) (*Branch, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects/%s/repository/branches/%s/protect\", PathEscape(project), url.PathEscape(branch))\n\n\treq, err := s.client.NewRequest(http.MethodPut, u, opts, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tb := new(Branch)\n\tresp, err := s.client.Do(req, b)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn b, resp, nil\n}", "func (t *TestBehaviour) RunUpdateProtectionBranch(msg string, param *models.GithubRepositoryBranchProtectionInput) {\n\t//it should be checking enforce admin if worked\n\t//it should be checking if all of the enabled checks are there and disabled are not there as well\n\tendpoint := fmt.Sprintf(\"%s/project/%s/github/repositories/%s/branch-protection\", t.apiURL, projectSFID, repositoryID)\n\tfrisby.Create(fmt.Sprintf(\"Update Protection Branch - %s - ProjectSFID : %s, RepositoryID: %s\", msg, projectSFID, repositoryID)).\n\t\tPost(endpoint).\n\t\tSetHeaders(t.getCLAProjectManagerHeaders()).\n\t\tSetJson(param).\n\t\tSend().\n\t\tExpectStatus(200).\n\t\tExpectJsonType(\"branch_name\", reflect.String).\n\t\tExpectJsonType(\"enforce_admin\", reflect.Bool).\n\t\tExpectJsonType(\"protection_enabled\", reflect.Bool).\n\t\tAfterText(func(F *frisby.Frisby, text string, err error) {\n\t\t\tvar response models.GithubRepositoryBranchProtection\n\t\t\tunmarshallErr := json.Unmarshal([]byte(text), &response)\n\t\t\tif unmarshallErr != nil {\n\t\t\t\tF.AddError(unmarshallErr.Error())\n\t\t\t}\n\t\t})\n\n\tt.RunGetProtectedBranch(&models.GithubRepositoryBranchProtection{\n\t\tBranchName: swag.String(\"master\"),\n\t\tEnforceAdmin: *param.EnforceAdmin,\n\t\tProtectionEnabled: true,\n\t\tStatusChecks: param.StatusChecks,\n\t})\n}", "func checkBranchProtection(repo *github.Repository) {\n\n\tneedsUpdate := false\n\treviewContexts := []string{defaultStatusCheck}\n\n\tprotection, response, err := client.Repositories.GetBranchProtection(ctx, repo.GetOwner().GetLogin(), repo.GetName(), branchToProtect)\n\tif err != nil && response.StatusCode != 404 {\n\t\t// we don't care about 404 responses, as this just tells us it's not protected\n\t\tprintln(\"Could not check branch protection\", err.Error())\n\t\treturn\n\t}\n\n\tif response.StatusCode == 404 {\n\t\tneedsUpdate = true\n\t} else {\n\t\treviews := protection.GetRequiredPullRequestReviews()\n\t\tif reviews == nil || reviews.RequiredApprovingReviewCount != 1 {\n\t\t\tneedsUpdate = true\n\t\t}\n\n\t\tstatusChecks := protection.GetRequiredStatusChecks()\n\t\tif statusChecks == nil || statusChecks.Strict != true {\n\t\t\tneedsUpdate = true\n\t\t}\n\n\t\tif statusChecks == nil || !contains(statusChecks.Contexts, defaultStatusCheck) {\n\t\t\tneedsUpdate = true\n\t\t}\n\n\t\tif strings.Contains(repo.GetName(), additionalStatusCheckContains) {\n\n\t\t\tif len(statusChecks.Contexts) != len(append(additionalStatusChecks, reviewContexts...)) {\n\t\t\t\tneedsUpdate = true\n\t\t\t\treviewContexts = append(additionalStatusChecks, reviewContexts...)\n\t\t\t} else {\n\t\t\t\tfor _, reviewContext := range additionalStatusChecks {\n\t\t\t\t\tif !contains(statusChecks.Contexts, reviewContext) {\n\t\t\t\t\t\tneedsUpdate = true\n\t\t\t\t\t\treviewContexts = append(additionalStatusChecks, reviewContexts...)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif needsUpdate {\n\t\tprintln(\"Branch protection isn't correct, so updating it\")\n\n\t\tprotectionRequest := &github.ProtectionRequest{\n\t\t\tRequiredStatusChecks: &github.RequiredStatusChecks{\n\t\t\t\tStrict: true,\n\t\t\t\tContexts: reviewContexts,\n\t\t\t},\n\t\t\tRequiredPullRequestReviews: &github.PullRequestReviewsEnforcementRequest{\n\t\t\t\tDismissStaleReviews: false,\n\t\t\t\tRequireCodeOwnerReviews: false,\n\t\t\t\tRequiredApprovingReviewCount: 1,\n\t\t\t},\n\t\t\tEnforceAdmins: false,\n\t\t}\n\t\tclient.Repositories.UpdateBranchProtection(ctx, repo.GetOwner().GetLogin(), repo.GetName(), branchToProtect, protectionRequest)\n\t}\n}", "func (mr *MockRepositoryClientMockRecorder) UpdateBranchProtection(org, repo, branch, config interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateBranchProtection\", reflect.TypeOf((*MockRepositoryClient)(nil).UpdateBranchProtection), org, repo, branch, config)\n}", "func UpdateOrgProtectBranch(repo *Repository, protectBranch *ProtectBranch, whitelistUserIDs, whitelistTeamIDs string) (err error) {\n\tif err = repo.GetOwner(); err != nil {\n\t\treturn fmt.Errorf(\"GetOwner: %v\", err)\n\t} else if !repo.Owner.IsOrganization() {\n\t\treturn fmt.Errorf(\"expect repository owner to be an organization\")\n\t}\n\n\thasUsersChanged := false\n\tvalidUserIDs := tool.StringsToInt64s(strings.Split(protectBranch.WhitelistUserIDs, \",\"))\n\tif protectBranch.WhitelistUserIDs != whitelistUserIDs {\n\t\thasUsersChanged = true\n\t\tuserIDs := tool.StringsToInt64s(strings.Split(whitelistUserIDs, \",\"))\n\t\tvalidUserIDs = make([]int64, 0, len(userIDs))\n\t\tfor _, userID := range userIDs {\n\t\t\tif !Perms.Authorize(context.TODO(), userID, repo.ID, AccessModeWrite,\n\t\t\t\tAccessModeOptions{\n\t\t\t\t\tOwnerID: repo.OwnerID,\n\t\t\t\t\tPrivate: repo.IsPrivate,\n\t\t\t\t},\n\t\t\t) {\n\t\t\t\tcontinue // Drop invalid user ID\n\t\t\t}\n\n\t\t\tvalidUserIDs = append(validUserIDs, userID)\n\t\t}\n\n\t\tprotectBranch.WhitelistUserIDs = strings.Join(tool.Int64sToStrings(validUserIDs), \",\")\n\t}\n\n\thasTeamsChanged := false\n\tvalidTeamIDs := tool.StringsToInt64s(strings.Split(protectBranch.WhitelistTeamIDs, \",\"))\n\tif protectBranch.WhitelistTeamIDs != whitelistTeamIDs {\n\t\thasTeamsChanged = true\n\t\tteamIDs := tool.StringsToInt64s(strings.Split(whitelistTeamIDs, \",\"))\n\t\tteams, err := GetTeamsHaveAccessToRepo(repo.OwnerID, repo.ID, AccessModeWrite)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"GetTeamsHaveAccessToRepo [org_id: %d, repo_id: %d]: %v\", repo.OwnerID, repo.ID, err)\n\t\t}\n\t\tvalidTeamIDs = make([]int64, 0, len(teams))\n\t\tfor i := range teams {\n\t\t\tif teams[i].HasWriteAccess() && com.IsSliceContainsInt64(teamIDs, teams[i].ID) {\n\t\t\t\tvalidTeamIDs = append(validTeamIDs, teams[i].ID)\n\t\t\t}\n\t\t}\n\n\t\tprotectBranch.WhitelistTeamIDs = strings.Join(tool.Int64sToStrings(validTeamIDs), \",\")\n\t}\n\n\t// Make sure protectBranch.ID is not 0 for whitelists\n\tif protectBranch.ID == 0 {\n\t\tif _, err = x.Insert(protectBranch); err != nil {\n\t\t\treturn fmt.Errorf(\"Insert: %v\", err)\n\t\t}\n\t}\n\n\t// Merge users and members of teams\n\tvar whitelists []*ProtectBranchWhitelist\n\tif hasUsersChanged || hasTeamsChanged {\n\t\tmergedUserIDs := make(map[int64]bool)\n\t\tfor _, userID := range validUserIDs {\n\t\t\t// Empty whitelist users can cause an ID with 0\n\t\t\tif userID != 0 {\n\t\t\t\tmergedUserIDs[userID] = true\n\t\t\t}\n\t\t}\n\n\t\tfor _, teamID := range validTeamIDs {\n\t\t\tmembers, err := GetTeamMembers(teamID)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"GetTeamMembers [team_id: %d]: %v\", teamID, err)\n\t\t\t}\n\n\t\t\tfor i := range members {\n\t\t\t\tmergedUserIDs[members[i].ID] = true\n\t\t\t}\n\t\t}\n\n\t\twhitelists = make([]*ProtectBranchWhitelist, 0, len(mergedUserIDs))\n\t\tfor userID := range mergedUserIDs {\n\t\t\twhitelists = append(whitelists, &ProtectBranchWhitelist{\n\t\t\t\tProtectBranchID: protectBranch.ID,\n\t\t\t\tRepoID: repo.ID,\n\t\t\t\tName: protectBranch.Name,\n\t\t\t\tUserID: userID,\n\t\t\t})\n\t\t}\n\t}\n\n\tsess := x.NewSession()\n\tdefer sess.Close()\n\tif err = sess.Begin(); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err = sess.ID(protectBranch.ID).AllCols().Update(protectBranch); err != nil {\n\t\treturn fmt.Errorf(\"Update: %v\", err)\n\t}\n\n\t// Refresh whitelists\n\tif hasUsersChanged || hasTeamsChanged {\n\t\tif _, err = sess.Delete(&ProtectBranchWhitelist{ProtectBranchID: protectBranch.ID}); err != nil {\n\t\t\treturn fmt.Errorf(\"delete old protect branch whitelists: %v\", err)\n\t\t} else if _, err = sess.Insert(whitelists); err != nil {\n\t\t\treturn fmt.Errorf(\"insert new protect branch whitelists: %v\", err)\n\t\t}\n\t}\n\n\treturn sess.Commit()\n}", "func setBranchProtections(ctx context.Context, client *github.Client, repoName, branchStrategy string) error {\n\trequiredStatusChecks := github.RequiredStatusChecks{\n\t\tStrict: true,\n\t\tContexts: []string{},\n\t}\n\n\tdismissalRestrictionsRequest := github.DismissalRestrictionsRequest{\n\t\tUsers: &[]string{},\n\t\tTeams: &[]string{dpTeamSlug},\n\t}\n\trequiredPullRequestReviewsEnforcementRequest := github.PullRequestReviewsEnforcementRequest{\n\t\tDismissalRestrictionsRequest: &dismissalRestrictionsRequest,\n\t\tDismissStaleReviews: true,\n\t\tRequireCodeOwnerReviews: true,\n\t\tRequiredApprovingReviewCount: 1,\n\t}\n\n\tbranchRestrictions := github.BranchRestrictionsRequest{\n\t\tUsers: []string{},\n\t\tTeams: []string{dpTeamSlug},\n\t}\n\n\tprotectionRequest := github.ProtectionRequest{\n\t\tRequiredStatusChecks: &requiredStatusChecks,\n\t\tRequiredPullRequestReviews: &requiredPullRequestReviewsEnforcementRequest,\n\t\tEnforceAdmins: true,\n\t\tRestrictions: &branchRestrictions,\n\t}\n\t_, _, err := client.Repositories.UpdateBranchProtection(ctx, org, repoName, \"main\", &protectionRequest)\n\tif err != nil {\n\t\tlog.Error(ctx, \"update branch protection failed for main\", err)\n\t\treturn err\n\t}\n\n\tif branchStrategy == \"git\" {\n\t\t_, _, err = client.Repositories.UpdateBranchProtection(ctx, org, repoName, \"develop\", &protectionRequest)\n\t\tif err != nil {\n\t\t\tlog.Error(ctx, \"update branch protection failed for develop\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\tvar resp *github.Response\n\t_, resp, err = client.Repositories.RequireSignaturesOnProtectedBranch(ctx, org, repoName, \"main\")\n\tif err != nil {\n\t\tlog.Error(ctx, \"adding protection, require signatures failed on branch main\", err, log.Data{\"response\": resp})\n\t\treturn err\n\t}\n\tif branchStrategy == \"git\" {\n\t\t_, resp, err = client.Repositories.RequireSignaturesOnProtectedBranch(ctx, org, repoName, \"develop\")\n\t\tif err != nil {\n\t\t\tlog.Error(ctx, \"adding protection, require signatures failed on branch develop\", err, log.Data{\"response\": resp})\n\t\t\treturn err\n\t\t}\n\t}\n\treturn err\n}", "func (mr *MockClientMockRecorder) UpdateBranchProtection(org, repo, branch, config interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateBranchProtection\", reflect.TypeOf((*MockClient)(nil).UpdateBranchProtection), org, repo, branch, config)\n}", "func (m *MockClient) UpdateBranchProtection(org, repo, branch string, config github.BranchProtectionRequest) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateBranchProtection\", org, repo, branch, config)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (p GithubRepoHost) AddBranchProtection(repoID string) (BranchProtectionRule, error) {\n\tif isDebug() {\n\t\tfmt.Printf(\"Adding branch protection on %s\\n\", repoID)\n\t}\n\n\trules := fetchBranchProtectionRules()\n\tinput := githubv4.CreateBranchProtectionRuleInput{\n\t\tRepositoryID: repoID,\n\t\tPattern: *githubv4.NewString(githubv4.String(rules.Pattern)),\n\t\tDismissesStaleReviews: githubv4.NewBoolean(githubv4.Boolean(rules.DismissesStaleReviews)),\n\t\tIsAdminEnforced: githubv4.NewBoolean(githubv4.Boolean(rules.IsAdminEnforced)),\n\t\tRequiresApprovingReviews: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresApprovingReviews)),\n\t\tRequiredApprovingReviewCount: githubv4.NewInt(githubv4.Int(rules.RequiredApprovingReviewCount)),\n\t\tRequiresStatusChecks: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresStatusChecks)),\n\t}\n\n\tchecks := make([]githubv4.String, len(rules.RequiredStatusCheckContexts))\n\tfor i, name := range rules.RequiredStatusCheckContexts {\n\t\tchecks[i] = *githubv4.NewString(githubv4.String(name))\n\t}\n\tinput.RequiredStatusCheckContexts = &checks\n\n\tvar m CreateRuleMutation\n\tclient := buildClient()\n\terr := client.Mutate(context.Background(), &m, input, nil)\n\treturn m.CreateBranchProtectionRule.BranchProtectionRule, err\n}", "func (m *MockRepositoryClient) UpdateBranchProtection(org, repo, branch string, config github.BranchProtectionRequest) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateBranchProtection\", org, repo, branch, config)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func CreateBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation POST /repos/{owner}/{repo}/branch_protections repository repoCreateBranchProtection\n\t// ---\n\t// summary: Create a branch protections for a repository\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/CreateBranchProtectionOption\"\n\t// responses:\n\t// \"201\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"403\":\n\t// \"$ref\": \"#/responses/forbidden\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\t// \"422\":\n\t// \"$ref\": \"#/responses/validationError\"\n\n\tform := web.GetForm(ctx).(*api.CreateBranchProtectionOption)\n\trepo := ctx.Repo.Repository\n\n\truleName := form.RuleName\n\tif ruleName == \"\" {\n\t\truleName = form.BranchName //nolint\n\t}\n\tif len(ruleName) == 0 {\n\t\tctx.Error(http.StatusBadRequest, \"both rule_name and branch_name are empty\", \"both rule_name and branch_name are empty\")\n\t\treturn\n\t}\n\n\tisPlainRule := !git_model.IsRuleNameSpecial(ruleName)\n\tvar isBranchExist bool\n\tif isPlainRule {\n\t\tisBranchExist = git.IsBranchExist(ctx.Req.Context(), ctx.Repo.Repository.RepoPath(), ruleName)\n\t}\n\n\tprotectBranch, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, ruleName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectBranchOfRepoByName\", err)\n\t\treturn\n\t} else if protectBranch != nil {\n\t\tctx.Error(http.StatusForbidden, \"Create branch protection\", \"Branch protection already exist\")\n\t\treturn\n\t}\n\n\tvar requiredApprovals int64\n\tif form.RequiredApprovals > 0 {\n\t\trequiredApprovals = form.RequiredApprovals\n\t}\n\n\twhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.PushWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tmergeWhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.MergeWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tapprovalsWhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.ApprovalsWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tvar whitelistTeams, mergeWhitelistTeams, approvalsWhitelistTeams []int64\n\tif repo.Owner.IsOrganization() {\n\t\twhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.PushWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t\tmergeWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.MergeWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t\tapprovalsWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.ApprovalsWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tprotectBranch = &git_model.ProtectedBranch{\n\t\tRepoID: ctx.Repo.Repository.ID,\n\t\tRuleName: ruleName,\n\t\tCanPush: form.EnablePush,\n\t\tEnableWhitelist: form.EnablePush && form.EnablePushWhitelist,\n\t\tEnableMergeWhitelist: form.EnableMergeWhitelist,\n\t\tWhitelistDeployKeys: form.EnablePush && form.EnablePushWhitelist && form.PushWhitelistDeployKeys,\n\t\tEnableStatusCheck: form.EnableStatusCheck,\n\t\tStatusCheckContexts: form.StatusCheckContexts,\n\t\tEnableApprovalsWhitelist: form.EnableApprovalsWhitelist,\n\t\tRequiredApprovals: requiredApprovals,\n\t\tBlockOnRejectedReviews: form.BlockOnRejectedReviews,\n\t\tBlockOnOfficialReviewRequests: form.BlockOnOfficialReviewRequests,\n\t\tDismissStaleApprovals: form.DismissStaleApprovals,\n\t\tRequireSignedCommits: form.RequireSignedCommits,\n\t\tProtectedFilePatterns: form.ProtectedFilePatterns,\n\t\tUnprotectedFilePatterns: form.UnprotectedFilePatterns,\n\t\tBlockOnOutdatedBranch: form.BlockOnOutdatedBranch,\n\t}\n\n\terr = git_model.UpdateProtectBranch(ctx, ctx.Repo.Repository, protectBranch, git_model.WhitelistOptions{\n\t\tUserIDs: whitelistUsers,\n\t\tTeamIDs: whitelistTeams,\n\t\tMergeUserIDs: mergeWhitelistUsers,\n\t\tMergeTeamIDs: mergeWhitelistTeams,\n\t\tApprovalsUserIDs: approvalsWhitelistUsers,\n\t\tApprovalsTeamIDs: approvalsWhitelistTeams,\n\t})\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"UpdateProtectBranch\", err)\n\t\treturn\n\t}\n\n\tif isBranchExist {\n\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, ruleName); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPRsForBaseBranch\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif !isPlainRule {\n\t\t\tif ctx.Repo.GitRepo == nil {\n\t\t\t\tctx.Repo.GitRepo, err = git.OpenRepository(ctx, ctx.Repo.Repository.RepoPath())\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"OpenRepository\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer func() {\n\t\t\t\t\tctx.Repo.GitRepo.Close()\n\t\t\t\t\tctx.Repo.GitRepo = nil\n\t\t\t\t}()\n\t\t\t}\n\t\t\t// FIXME: since we only need to recheck files protected rules, we could improve this\n\t\t\tmatchedBranches, err := git_model.FindAllMatchedBranches(ctx, ctx.Repo.Repository.ID, ruleName)\n\t\t\tif err != nil {\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"FindAllMatchedBranches\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, branchName := range matchedBranches {\n\t\t\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, branchName); err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPRsForBaseBranch\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Reload from db to get all whitelists\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, ctx.Repo.Repository.ID, ruleName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.Error(http.StatusInternalServerError, \"New branch protection not found\", err)\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusCreated, convert.ToBranchProtection(bp))\n}", "func CreateBranchProtection(c *CreateScaffoldCommand) (err error) {\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tc.Config.Logger.Info(\"Creating branch protection request\", \"branch\", c.Config.Base)\n\tstrict := true\n\tprotectionRequest := &github.ProtectionRequest{\n\t\tRequiredStatusChecks: &github.RequiredStatusChecks{\n\t\t\tStrict: false, Contexts: []string{},\n\t\t},\n\t\tRequiredPullRequestReviews: &github.PullRequestReviewsEnforcementRequest{\n\t\t\tDismissStaleReviews: strict, RequireCodeOwnerReviews: strict, RequiredApprovingReviewCount: 3,\n\t\t},\n\t\tEnforceAdmins: strict,\n\t\tRequireLinearHistory: &strict,\n\t\tAllowForcePushes: &strict,\n\t\tAllowDeletions: &strict,\n\t}\n\n\tc.Config.Logger.Info(\"Applying branch protection\", \"branch\", c.Config.Base)\n\t_, _, err = c.GithubClient.GetRepo().UpdateBranchProtection(ctx, c.Config.Owner, c.Config.Repo, c.Config.Base, protectionRequest)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create the base branch protection: %w\", err)\n\t}\n\n\treturn nil\n}", "func (s *BranchesService) UnprotectBranch(pid interface{}, branch string, options ...RequestOptionFunc) (*Branch, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects/%s/repository/branches/%s/unprotect\", PathEscape(project), url.PathEscape(branch))\n\n\treq, err := s.client.NewRequest(http.MethodPut, u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tb := new(Branch)\n\tresp, err := s.client.Do(req, b)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn b, resp, nil\n}", "func (c *client) RemoveBranchProtection(org, repo, branch string) error {\n\tdurationLogger := c.log(\"RemoveBranchProtection\", org, repo, branch)\n\tdefer durationLogger()\n\n\t_, err := c.request(&request{\n\t\tmethod: http.MethodDelete,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/branches/%s/protection\", org, repo, branch),\n\t\torg: org,\n\t\texitCodes: []int{204},\n\t}, nil)\n\treturn err\n}", "func DeleteBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation DELETE /repos/{owner}/{repo}/branch_protections/{name} repository repoDeleteBranchProtection\n\t// ---\n\t// summary: Delete a specific branch protection for the repository\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: name\n\t// in: path\n\t// description: name of protected branch\n\t// type: string\n\t// required: true\n\t// responses:\n\t// \"204\":\n\t// \"$ref\": \"#/responses/empty\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\n\trepo := ctx.Repo.Repository\n\tbpName := ctx.Params(\":name\")\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != repo.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tif err := git_model.DeleteProtectedBranch(ctx, ctx.Repo.Repository.ID, bp.ID); err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"DeleteProtectedBranch\", err)\n\t\treturn\n\t}\n\n\tctx.Status(http.StatusNoContent)\n}", "func (m *MarkerIndexBranchIDMapping) SetBranchID(index markers.Index, branchID ledgerstate.BranchID) {\n\tm.mappingMutex.Lock()\n\tdefer m.mappingMutex.Unlock()\n\n\tm.mapping.Set(index, branchID)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
UpdateOrgProtectBranch saves branch protection options of organizational repository. If ID is 0, it creates a new record. Otherwise, updates existing record. This function also performs check if whitelist user and team's IDs have been changed to avoid unnecessary whitelist delete and regenerate.
func UpdateOrgProtectBranch(repo *Repository, protectBranch *ProtectBranch, whitelistUserIDs, whitelistTeamIDs string) (err error) { if err = repo.GetOwner(); err != nil { return fmt.Errorf("GetOwner: %v", err) } else if !repo.Owner.IsOrganization() { return fmt.Errorf("expect repository owner to be an organization") } hasUsersChanged := false validUserIDs := tool.StringsToInt64s(strings.Split(protectBranch.WhitelistUserIDs, ",")) if protectBranch.WhitelistUserIDs != whitelistUserIDs { hasUsersChanged = true userIDs := tool.StringsToInt64s(strings.Split(whitelistUserIDs, ",")) validUserIDs = make([]int64, 0, len(userIDs)) for _, userID := range userIDs { if !Perms.Authorize(context.TODO(), userID, repo.ID, AccessModeWrite, AccessModeOptions{ OwnerID: repo.OwnerID, Private: repo.IsPrivate, }, ) { continue // Drop invalid user ID } validUserIDs = append(validUserIDs, userID) } protectBranch.WhitelistUserIDs = strings.Join(tool.Int64sToStrings(validUserIDs), ",") } hasTeamsChanged := false validTeamIDs := tool.StringsToInt64s(strings.Split(protectBranch.WhitelistTeamIDs, ",")) if protectBranch.WhitelistTeamIDs != whitelistTeamIDs { hasTeamsChanged = true teamIDs := tool.StringsToInt64s(strings.Split(whitelistTeamIDs, ",")) teams, err := GetTeamsHaveAccessToRepo(repo.OwnerID, repo.ID, AccessModeWrite) if err != nil { return fmt.Errorf("GetTeamsHaveAccessToRepo [org_id: %d, repo_id: %d]: %v", repo.OwnerID, repo.ID, err) } validTeamIDs = make([]int64, 0, len(teams)) for i := range teams { if teams[i].HasWriteAccess() && com.IsSliceContainsInt64(teamIDs, teams[i].ID) { validTeamIDs = append(validTeamIDs, teams[i].ID) } } protectBranch.WhitelistTeamIDs = strings.Join(tool.Int64sToStrings(validTeamIDs), ",") } // Make sure protectBranch.ID is not 0 for whitelists if protectBranch.ID == 0 { if _, err = x.Insert(protectBranch); err != nil { return fmt.Errorf("Insert: %v", err) } } // Merge users and members of teams var whitelists []*ProtectBranchWhitelist if hasUsersChanged || hasTeamsChanged { mergedUserIDs := make(map[int64]bool) for _, userID := range validUserIDs { // Empty whitelist users can cause an ID with 0 if userID != 0 { mergedUserIDs[userID] = true } } for _, teamID := range validTeamIDs { members, err := GetTeamMembers(teamID) if err != nil { return fmt.Errorf("GetTeamMembers [team_id: %d]: %v", teamID, err) } for i := range members { mergedUserIDs[members[i].ID] = true } } whitelists = make([]*ProtectBranchWhitelist, 0, len(mergedUserIDs)) for userID := range mergedUserIDs { whitelists = append(whitelists, &ProtectBranchWhitelist{ ProtectBranchID: protectBranch.ID, RepoID: repo.ID, Name: protectBranch.Name, UserID: userID, }) } } sess := x.NewSession() defer sess.Close() if err = sess.Begin(); err != nil { return err } if _, err = sess.ID(protectBranch.ID).AllCols().Update(protectBranch); err != nil { return fmt.Errorf("Update: %v", err) } // Refresh whitelists if hasUsersChanged || hasTeamsChanged { if _, err = sess.Delete(&ProtectBranchWhitelist{ProtectBranchID: protectBranch.ID}); err != nil { return fmt.Errorf("delete old protect branch whitelists: %v", err) } else if _, err = sess.Insert(whitelists); err != nil { return fmt.Errorf("insert new protect branch whitelists: %v", err) } } return sess.Commit() }
[ "func EditBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation PATCH /repos/{owner}/{repo}/branch_protections/{name} repository repoEditBranchProtection\n\t// ---\n\t// summary: Edit a branch protections for a repository. Only fields that are set will be changed\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: name\n\t// in: path\n\t// description: name of protected branch\n\t// type: string\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/EditBranchProtectionOption\"\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\t// \"422\":\n\t// \"$ref\": \"#/responses/validationError\"\n\tform := web.GetForm(ctx).(*api.EditBranchProtectionOption)\n\trepo := ctx.Repo.Repository\n\tbpName := ctx.Params(\":name\")\n\tprotectBranch, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif protectBranch == nil || protectBranch.RepoID != repo.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tif form.EnablePush != nil {\n\t\tif !*form.EnablePush {\n\t\t\tprotectBranch.CanPush = false\n\t\t\tprotectBranch.EnableWhitelist = false\n\t\t\tprotectBranch.WhitelistDeployKeys = false\n\t\t} else {\n\t\t\tprotectBranch.CanPush = true\n\t\t\tif form.EnablePushWhitelist != nil {\n\t\t\t\tif !*form.EnablePushWhitelist {\n\t\t\t\t\tprotectBranch.EnableWhitelist = false\n\t\t\t\t\tprotectBranch.WhitelistDeployKeys = false\n\t\t\t\t} else {\n\t\t\t\t\tprotectBranch.EnableWhitelist = true\n\t\t\t\t\tif form.PushWhitelistDeployKeys != nil {\n\t\t\t\t\t\tprotectBranch.WhitelistDeployKeys = *form.PushWhitelistDeployKeys\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif form.EnableMergeWhitelist != nil {\n\t\tprotectBranch.EnableMergeWhitelist = *form.EnableMergeWhitelist\n\t}\n\n\tif form.EnableStatusCheck != nil {\n\t\tprotectBranch.EnableStatusCheck = *form.EnableStatusCheck\n\t}\n\n\tif form.StatusCheckContexts != nil {\n\t\tprotectBranch.StatusCheckContexts = form.StatusCheckContexts\n\t}\n\n\tif form.RequiredApprovals != nil && *form.RequiredApprovals >= 0 {\n\t\tprotectBranch.RequiredApprovals = *form.RequiredApprovals\n\t}\n\n\tif form.EnableApprovalsWhitelist != nil {\n\t\tprotectBranch.EnableApprovalsWhitelist = *form.EnableApprovalsWhitelist\n\t}\n\n\tif form.BlockOnRejectedReviews != nil {\n\t\tprotectBranch.BlockOnRejectedReviews = *form.BlockOnRejectedReviews\n\t}\n\n\tif form.BlockOnOfficialReviewRequests != nil {\n\t\tprotectBranch.BlockOnOfficialReviewRequests = *form.BlockOnOfficialReviewRequests\n\t}\n\n\tif form.DismissStaleApprovals != nil {\n\t\tprotectBranch.DismissStaleApprovals = *form.DismissStaleApprovals\n\t}\n\n\tif form.RequireSignedCommits != nil {\n\t\tprotectBranch.RequireSignedCommits = *form.RequireSignedCommits\n\t}\n\n\tif form.ProtectedFilePatterns != nil {\n\t\tprotectBranch.ProtectedFilePatterns = *form.ProtectedFilePatterns\n\t}\n\n\tif form.UnprotectedFilePatterns != nil {\n\t\tprotectBranch.UnprotectedFilePatterns = *form.UnprotectedFilePatterns\n\t}\n\n\tif form.BlockOnOutdatedBranch != nil {\n\t\tprotectBranch.BlockOnOutdatedBranch = *form.BlockOnOutdatedBranch\n\t}\n\n\tvar whitelistUsers []int64\n\tif form.PushWhitelistUsernames != nil {\n\t\twhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.PushWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\twhitelistUsers = protectBranch.WhitelistUserIDs\n\t}\n\tvar mergeWhitelistUsers []int64\n\tif form.MergeWhitelistUsernames != nil {\n\t\tmergeWhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.MergeWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tmergeWhitelistUsers = protectBranch.MergeWhitelistUserIDs\n\t}\n\tvar approvalsWhitelistUsers []int64\n\tif form.ApprovalsWhitelistUsernames != nil {\n\t\tapprovalsWhitelistUsers, err = user_model.GetUserIDsByNames(ctx, form.ApprovalsWhitelistUsernames, false)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tapprovalsWhitelistUsers = protectBranch.ApprovalsWhitelistUserIDs\n\t}\n\n\tvar whitelistTeams, mergeWhitelistTeams, approvalsWhitelistTeams []int64\n\tif repo.Owner.IsOrganization() {\n\t\tif form.PushWhitelistTeams != nil {\n\t\t\twhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.PushWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\twhitelistTeams = protectBranch.WhitelistTeamIDs\n\t\t}\n\t\tif form.MergeWhitelistTeams != nil {\n\t\t\tmergeWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.MergeWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tmergeWhitelistTeams = protectBranch.MergeWhitelistTeamIDs\n\t\t}\n\t\tif form.ApprovalsWhitelistTeams != nil {\n\t\t\tapprovalsWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.ApprovalsWhitelistTeams, false)\n\t\t\tif err != nil {\n\t\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tapprovalsWhitelistTeams = protectBranch.ApprovalsWhitelistTeamIDs\n\t\t}\n\t}\n\n\terr = git_model.UpdateProtectBranch(ctx, ctx.Repo.Repository, protectBranch, git_model.WhitelistOptions{\n\t\tUserIDs: whitelistUsers,\n\t\tTeamIDs: whitelistTeams,\n\t\tMergeUserIDs: mergeWhitelistUsers,\n\t\tMergeTeamIDs: mergeWhitelistTeams,\n\t\tApprovalsUserIDs: approvalsWhitelistUsers,\n\t\tApprovalsTeamIDs: approvalsWhitelistTeams,\n\t})\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"UpdateProtectBranch\", err)\n\t\treturn\n\t}\n\n\tisPlainRule := !git_model.IsRuleNameSpecial(bpName)\n\tvar isBranchExist bool\n\tif isPlainRule {\n\t\tisBranchExist = git.IsBranchExist(ctx.Req.Context(), ctx.Repo.Repository.RepoPath(), bpName)\n\t}\n\n\tif isBranchExist {\n\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, bpName); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPrsForBaseBranch\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif !isPlainRule {\n\t\t\tif ctx.Repo.GitRepo == nil {\n\t\t\t\tctx.Repo.GitRepo, err = git.OpenRepository(ctx, ctx.Repo.Repository.RepoPath())\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"OpenRepository\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer func() {\n\t\t\t\t\tctx.Repo.GitRepo.Close()\n\t\t\t\t\tctx.Repo.GitRepo = nil\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\t// FIXME: since we only need to recheck files protected rules, we could improve this\n\t\t\tmatchedBranches, err := git_model.FindAllMatchedBranches(ctx, ctx.Repo.Repository.ID, protectBranch.RuleName)\n\t\t\tif err != nil {\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"FindAllMatchedBranches\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, branchName := range matchedBranches {\n\t\t\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, branchName); err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPrsForBaseBranch\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Reload from db to ensure get all whitelists\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchBy\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.Error(http.StatusInternalServerError, \"New branch protection not found\", err)\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusOK, convert.ToBranchProtection(bp))\n}", "func UpdateProtectBranch(protectBranch *ProtectBranch) (err error) {\n\tsess := x.NewSession()\n\tdefer sess.Close()\n\tif err = sess.Begin(); err != nil {\n\t\treturn err\n\t}\n\n\tif protectBranch.ID == 0 {\n\t\tif _, err = sess.Insert(protectBranch); err != nil {\n\t\t\treturn fmt.Errorf(\"Insert: %v\", err)\n\t\t}\n\t}\n\n\tif _, err = sess.ID(protectBranch.ID).AllCols().Update(protectBranch); err != nil {\n\t\treturn fmt.Errorf(\"Update: %v\", err)\n\t}\n\n\treturn sess.Commit()\n}", "func (c *client) UpdateBranchProtection(org, repo, branch string, config BranchProtectionRequest) error {\n\tdurationLogger := c.log(\"UpdateBranchProtection\", org, repo, branch, config)\n\tdefer durationLogger()\n\n\t_, err := c.request(&request{\n\t\taccept: \"application/vnd.github.luke-cage-preview+json\", // for required_approving_review_count\n\t\tmethod: http.MethodPut,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/branches/%s/protection\", org, repo, branch),\n\t\torg: org,\n\t\trequestBody: config,\n\t\texitCodes: []int{200},\n\t}, nil)\n\treturn err\n}", "func UpdateBranchProtection() error {\n\tvar wg sync.WaitGroup\n\trequests, err := getBranchProtectionRequests()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twg.Add(len(requests))\n\towner, repo := getOwnerRepo()\n\n\tfor _, bp := range requests {\n\t\tgo func(bp BranchProtection) {\n\t\t\tdefer wg.Done()\n\t\t\t_, _, err := cli.Repositories.UpdateBranchProtection(ctx, owner, repo, bp.Branch, bp.Protection)\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\t_, err = fmt.Fprintln(writer, fmt.Sprintf(\"branch %v has been protected\", bp.Branch))\n\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}(bp)\n\t}\n\n\twg.Wait()\n\n\treturn nil\n}", "func (t *TestBehaviour) RunUpdateProtectionBranch(msg string, param *models.GithubRepositoryBranchProtectionInput) {\n\t//it should be checking enforce admin if worked\n\t//it should be checking if all of the enabled checks are there and disabled are not there as well\n\tendpoint := fmt.Sprintf(\"%s/project/%s/github/repositories/%s/branch-protection\", t.apiURL, projectSFID, repositoryID)\n\tfrisby.Create(fmt.Sprintf(\"Update Protection Branch - %s - ProjectSFID : %s, RepositoryID: %s\", msg, projectSFID, repositoryID)).\n\t\tPost(endpoint).\n\t\tSetHeaders(t.getCLAProjectManagerHeaders()).\n\t\tSetJson(param).\n\t\tSend().\n\t\tExpectStatus(200).\n\t\tExpectJsonType(\"branch_name\", reflect.String).\n\t\tExpectJsonType(\"enforce_admin\", reflect.Bool).\n\t\tExpectJsonType(\"protection_enabled\", reflect.Bool).\n\t\tAfterText(func(F *frisby.Frisby, text string, err error) {\n\t\t\tvar response models.GithubRepositoryBranchProtection\n\t\t\tunmarshallErr := json.Unmarshal([]byte(text), &response)\n\t\t\tif unmarshallErr != nil {\n\t\t\t\tF.AddError(unmarshallErr.Error())\n\t\t\t}\n\t\t})\n\n\tt.RunGetProtectedBranch(&models.GithubRepositoryBranchProtection{\n\t\tBranchName: swag.String(\"master\"),\n\t\tEnforceAdmin: *param.EnforceAdmin,\n\t\tProtectionEnabled: true,\n\t\tStatusChecks: param.StatusChecks,\n\t})\n}", "func (mr *MockRepositoryClientMockRecorder) UpdateBranchProtection(org, repo, branch, config interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateBranchProtection\", reflect.TypeOf((*MockRepositoryClient)(nil).UpdateBranchProtection), org, repo, branch, config)\n}", "func checkBranchProtection(repo *github.Repository) {\n\n\tneedsUpdate := false\n\treviewContexts := []string{defaultStatusCheck}\n\n\tprotection, response, err := client.Repositories.GetBranchProtection(ctx, repo.GetOwner().GetLogin(), repo.GetName(), branchToProtect)\n\tif err != nil && response.StatusCode != 404 {\n\t\t// we don't care about 404 responses, as this just tells us it's not protected\n\t\tprintln(\"Could not check branch protection\", err.Error())\n\t\treturn\n\t}\n\n\tif response.StatusCode == 404 {\n\t\tneedsUpdate = true\n\t} else {\n\t\treviews := protection.GetRequiredPullRequestReviews()\n\t\tif reviews == nil || reviews.RequiredApprovingReviewCount != 1 {\n\t\t\tneedsUpdate = true\n\t\t}\n\n\t\tstatusChecks := protection.GetRequiredStatusChecks()\n\t\tif statusChecks == nil || statusChecks.Strict != true {\n\t\t\tneedsUpdate = true\n\t\t}\n\n\t\tif statusChecks == nil || !contains(statusChecks.Contexts, defaultStatusCheck) {\n\t\t\tneedsUpdate = true\n\t\t}\n\n\t\tif strings.Contains(repo.GetName(), additionalStatusCheckContains) {\n\n\t\t\tif len(statusChecks.Contexts) != len(append(additionalStatusChecks, reviewContexts...)) {\n\t\t\t\tneedsUpdate = true\n\t\t\t\treviewContexts = append(additionalStatusChecks, reviewContexts...)\n\t\t\t} else {\n\t\t\t\tfor _, reviewContext := range additionalStatusChecks {\n\t\t\t\t\tif !contains(statusChecks.Contexts, reviewContext) {\n\t\t\t\t\t\tneedsUpdate = true\n\t\t\t\t\t\treviewContexts = append(additionalStatusChecks, reviewContexts...)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif needsUpdate {\n\t\tprintln(\"Branch protection isn't correct, so updating it\")\n\n\t\tprotectionRequest := &github.ProtectionRequest{\n\t\t\tRequiredStatusChecks: &github.RequiredStatusChecks{\n\t\t\t\tStrict: true,\n\t\t\t\tContexts: reviewContexts,\n\t\t\t},\n\t\t\tRequiredPullRequestReviews: &github.PullRequestReviewsEnforcementRequest{\n\t\t\t\tDismissStaleReviews: false,\n\t\t\t\tRequireCodeOwnerReviews: false,\n\t\t\t\tRequiredApprovingReviewCount: 1,\n\t\t\t},\n\t\t\tEnforceAdmins: false,\n\t\t}\n\t\tclient.Repositories.UpdateBranchProtection(ctx, repo.GetOwner().GetLogin(), repo.GetName(), branchToProtect, protectionRequest)\n\t}\n}", "func (mr *MockClientMockRecorder) UpdateBranchProtection(org, repo, branch, config interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"UpdateBranchProtection\", reflect.TypeOf((*MockClient)(nil).UpdateBranchProtection), org, repo, branch, config)\n}", "func (s *BranchesService) ProtectBranch(pid interface{}, branch string, opts *ProtectBranchOptions, options ...RequestOptionFunc) (*Branch, *Response, error) {\n\tproject, err := parseID(pid)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tu := fmt.Sprintf(\"projects/%s/repository/branches/%s/protect\", PathEscape(project), url.PathEscape(branch))\n\n\treq, err := s.client.NewRequest(http.MethodPut, u, opts, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tb := new(Branch)\n\tresp, err := s.client.Do(req, b)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn b, resp, nil\n}", "func setBranchProtections(ctx context.Context, client *github.Client, repoName, branchStrategy string) error {\n\trequiredStatusChecks := github.RequiredStatusChecks{\n\t\tStrict: true,\n\t\tContexts: []string{},\n\t}\n\n\tdismissalRestrictionsRequest := github.DismissalRestrictionsRequest{\n\t\tUsers: &[]string{},\n\t\tTeams: &[]string{dpTeamSlug},\n\t}\n\trequiredPullRequestReviewsEnforcementRequest := github.PullRequestReviewsEnforcementRequest{\n\t\tDismissalRestrictionsRequest: &dismissalRestrictionsRequest,\n\t\tDismissStaleReviews: true,\n\t\tRequireCodeOwnerReviews: true,\n\t\tRequiredApprovingReviewCount: 1,\n\t}\n\n\tbranchRestrictions := github.BranchRestrictionsRequest{\n\t\tUsers: []string{},\n\t\tTeams: []string{dpTeamSlug},\n\t}\n\n\tprotectionRequest := github.ProtectionRequest{\n\t\tRequiredStatusChecks: &requiredStatusChecks,\n\t\tRequiredPullRequestReviews: &requiredPullRequestReviewsEnforcementRequest,\n\t\tEnforceAdmins: true,\n\t\tRestrictions: &branchRestrictions,\n\t}\n\t_, _, err := client.Repositories.UpdateBranchProtection(ctx, org, repoName, \"main\", &protectionRequest)\n\tif err != nil {\n\t\tlog.Error(ctx, \"update branch protection failed for main\", err)\n\t\treturn err\n\t}\n\n\tif branchStrategy == \"git\" {\n\t\t_, _, err = client.Repositories.UpdateBranchProtection(ctx, org, repoName, \"develop\", &protectionRequest)\n\t\tif err != nil {\n\t\t\tlog.Error(ctx, \"update branch protection failed for develop\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\tvar resp *github.Response\n\t_, resp, err = client.Repositories.RequireSignaturesOnProtectedBranch(ctx, org, repoName, \"main\")\n\tif err != nil {\n\t\tlog.Error(ctx, \"adding protection, require signatures failed on branch main\", err, log.Data{\"response\": resp})\n\t\treturn err\n\t}\n\tif branchStrategy == \"git\" {\n\t\t_, resp, err = client.Repositories.RequireSignaturesOnProtectedBranch(ctx, org, repoName, \"develop\")\n\t\tif err != nil {\n\t\t\tlog.Error(ctx, \"adding protection, require signatures failed on branch develop\", err, log.Data{\"response\": resp})\n\t\t\treturn err\n\t\t}\n\t}\n\treturn err\n}", "func (m *MockClient) UpdateBranchProtection(org, repo, branch string, config github.BranchProtectionRequest) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateBranchProtection\", org, repo, branch, config)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (m *MockRepositoryClient) UpdateBranchProtection(org, repo, branch string, config github.BranchProtectionRequest) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"UpdateBranchProtection\", org, repo, branch, config)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func (p GithubRepoHost) AddBranchProtection(repoID string) (BranchProtectionRule, error) {\n\tif isDebug() {\n\t\tfmt.Printf(\"Adding branch protection on %s\\n\", repoID)\n\t}\n\n\trules := fetchBranchProtectionRules()\n\tinput := githubv4.CreateBranchProtectionRuleInput{\n\t\tRepositoryID: repoID,\n\t\tPattern: *githubv4.NewString(githubv4.String(rules.Pattern)),\n\t\tDismissesStaleReviews: githubv4.NewBoolean(githubv4.Boolean(rules.DismissesStaleReviews)),\n\t\tIsAdminEnforced: githubv4.NewBoolean(githubv4.Boolean(rules.IsAdminEnforced)),\n\t\tRequiresApprovingReviews: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresApprovingReviews)),\n\t\tRequiredApprovingReviewCount: githubv4.NewInt(githubv4.Int(rules.RequiredApprovingReviewCount)),\n\t\tRequiresStatusChecks: githubv4.NewBoolean(githubv4.Boolean(rules.RequiresStatusChecks)),\n\t}\n\n\tchecks := make([]githubv4.String, len(rules.RequiredStatusCheckContexts))\n\tfor i, name := range rules.RequiredStatusCheckContexts {\n\t\tchecks[i] = *githubv4.NewString(githubv4.String(name))\n\t}\n\tinput.RequiredStatusCheckContexts = &checks\n\n\tvar m CreateRuleMutation\n\tclient := buildClient()\n\terr := client.Mutate(context.Background(), &m, input, nil)\n\treturn m.CreateBranchProtectionRule.BranchProtectionRule, err\n}", "func CreateBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation POST /repos/{owner}/{repo}/branch_protections repository repoCreateBranchProtection\n\t// ---\n\t// summary: Create a branch protections for a repository\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: body\n\t// in: body\n\t// schema:\n\t// \"$ref\": \"#/definitions/CreateBranchProtectionOption\"\n\t// responses:\n\t// \"201\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"403\":\n\t// \"$ref\": \"#/responses/forbidden\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\t// \"422\":\n\t// \"$ref\": \"#/responses/validationError\"\n\n\tform := web.GetForm(ctx).(*api.CreateBranchProtectionOption)\n\trepo := ctx.Repo.Repository\n\n\truleName := form.RuleName\n\tif ruleName == \"\" {\n\t\truleName = form.BranchName //nolint\n\t}\n\tif len(ruleName) == 0 {\n\t\tctx.Error(http.StatusBadRequest, \"both rule_name and branch_name are empty\", \"both rule_name and branch_name are empty\")\n\t\treturn\n\t}\n\n\tisPlainRule := !git_model.IsRuleNameSpecial(ruleName)\n\tvar isBranchExist bool\n\tif isPlainRule {\n\t\tisBranchExist = git.IsBranchExist(ctx.Req.Context(), ctx.Repo.Repository.RepoPath(), ruleName)\n\t}\n\n\tprotectBranch, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, ruleName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectBranchOfRepoByName\", err)\n\t\treturn\n\t} else if protectBranch != nil {\n\t\tctx.Error(http.StatusForbidden, \"Create branch protection\", \"Branch protection already exist\")\n\t\treturn\n\t}\n\n\tvar requiredApprovals int64\n\tif form.RequiredApprovals > 0 {\n\t\trequiredApprovals = form.RequiredApprovals\n\t}\n\n\twhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.PushWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tmergeWhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.MergeWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tapprovalsWhitelistUsers, err := user_model.GetUserIDsByNames(ctx, form.ApprovalsWhitelistUsernames, false)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\tctx.Error(http.StatusUnprocessableEntity, \"User does not exist\", err)\n\t\t\treturn\n\t\t}\n\t\tctx.Error(http.StatusInternalServerError, \"GetUserIDsByNames\", err)\n\t\treturn\n\t}\n\tvar whitelistTeams, mergeWhitelistTeams, approvalsWhitelistTeams []int64\n\tif repo.Owner.IsOrganization() {\n\t\twhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.PushWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t\tmergeWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.MergeWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t\tapprovalsWhitelistTeams, err = organization.GetTeamIDsByNames(repo.OwnerID, form.ApprovalsWhitelistTeams, false)\n\t\tif err != nil {\n\t\t\tif organization.IsErrTeamNotExist(err) {\n\t\t\t\tctx.Error(http.StatusUnprocessableEntity, \"Team does not exist\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetTeamIDsByNames\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tprotectBranch = &git_model.ProtectedBranch{\n\t\tRepoID: ctx.Repo.Repository.ID,\n\t\tRuleName: ruleName,\n\t\tCanPush: form.EnablePush,\n\t\tEnableWhitelist: form.EnablePush && form.EnablePushWhitelist,\n\t\tEnableMergeWhitelist: form.EnableMergeWhitelist,\n\t\tWhitelistDeployKeys: form.EnablePush && form.EnablePushWhitelist && form.PushWhitelistDeployKeys,\n\t\tEnableStatusCheck: form.EnableStatusCheck,\n\t\tStatusCheckContexts: form.StatusCheckContexts,\n\t\tEnableApprovalsWhitelist: form.EnableApprovalsWhitelist,\n\t\tRequiredApprovals: requiredApprovals,\n\t\tBlockOnRejectedReviews: form.BlockOnRejectedReviews,\n\t\tBlockOnOfficialReviewRequests: form.BlockOnOfficialReviewRequests,\n\t\tDismissStaleApprovals: form.DismissStaleApprovals,\n\t\tRequireSignedCommits: form.RequireSignedCommits,\n\t\tProtectedFilePatterns: form.ProtectedFilePatterns,\n\t\tUnprotectedFilePatterns: form.UnprotectedFilePatterns,\n\t\tBlockOnOutdatedBranch: form.BlockOnOutdatedBranch,\n\t}\n\n\terr = git_model.UpdateProtectBranch(ctx, ctx.Repo.Repository, protectBranch, git_model.WhitelistOptions{\n\t\tUserIDs: whitelistUsers,\n\t\tTeamIDs: whitelistTeams,\n\t\tMergeUserIDs: mergeWhitelistUsers,\n\t\tMergeTeamIDs: mergeWhitelistTeams,\n\t\tApprovalsUserIDs: approvalsWhitelistUsers,\n\t\tApprovalsTeamIDs: approvalsWhitelistTeams,\n\t})\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"UpdateProtectBranch\", err)\n\t\treturn\n\t}\n\n\tif isBranchExist {\n\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, ruleName); err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPRsForBaseBranch\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif !isPlainRule {\n\t\t\tif ctx.Repo.GitRepo == nil {\n\t\t\t\tctx.Repo.GitRepo, err = git.OpenRepository(ctx, ctx.Repo.Repository.RepoPath())\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"OpenRepository\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer func() {\n\t\t\t\t\tctx.Repo.GitRepo.Close()\n\t\t\t\t\tctx.Repo.GitRepo = nil\n\t\t\t\t}()\n\t\t\t}\n\t\t\t// FIXME: since we only need to recheck files protected rules, we could improve this\n\t\t\tmatchedBranches, err := git_model.FindAllMatchedBranches(ctx, ctx.Repo.Repository.ID, ruleName)\n\t\t\tif err != nil {\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"FindAllMatchedBranches\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, branchName := range matchedBranches {\n\t\t\t\tif err = pull_service.CheckPRsForBaseBranch(ctx, ctx.Repo.Repository, branchName); err != nil {\n\t\t\t\t\tctx.Error(http.StatusInternalServerError, \"CheckPRsForBaseBranch\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Reload from db to get all whitelists\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, ctx.Repo.Repository.ID, ruleName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != ctx.Repo.Repository.ID {\n\t\tctx.Error(http.StatusInternalServerError, \"New branch protection not found\", err)\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusCreated, convert.ToBranchProtection(bp))\n}", "func (db *DB) UpdateApplicationApproval(\n\tctx context.Context,\n\tappID int,\n\tstatus bool,\n\treason string,\n) error {\n\n\treturn nil\n}", "func (c *client) RemoveBranchProtection(org, repo, branch string) error {\n\tdurationLogger := c.log(\"RemoveBranchProtection\", org, repo, branch)\n\tdefer durationLogger()\n\n\t_, err := c.request(&request{\n\t\tmethod: http.MethodDelete,\n\t\tpath: fmt.Sprintf(\"/repos/%s/%s/branches/%s/protection\", org, repo, branch),\n\t\torg: org,\n\t\texitCodes: []int{204},\n\t}, nil)\n\treturn err\n}", "func (r *SettingRepository) AddBranchByOrgID(orgID string) (branch models.Branch, err error) {\n\tobjID := bson.NewObjectId()\n\tbranch.ID = objID\n\tbranch.OrgID = bson.ObjectIdHex(orgID)\n\tbranch.Status = \"Active\"\n\tbranch.CreatedAt = time.Now()\n\tbranch.UpdatedAt = time.Now()\n\n\terr = r.C.Insert(&branch)\n\treturn\n}", "func CreateBranchProtection(c *CreateScaffoldCommand) (err error) {\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tc.Config.Logger.Info(\"Creating branch protection request\", \"branch\", c.Config.Base)\n\tstrict := true\n\tprotectionRequest := &github.ProtectionRequest{\n\t\tRequiredStatusChecks: &github.RequiredStatusChecks{\n\t\t\tStrict: false, Contexts: []string{},\n\t\t},\n\t\tRequiredPullRequestReviews: &github.PullRequestReviewsEnforcementRequest{\n\t\t\tDismissStaleReviews: strict, RequireCodeOwnerReviews: strict, RequiredApprovingReviewCount: 3,\n\t\t},\n\t\tEnforceAdmins: strict,\n\t\tRequireLinearHistory: &strict,\n\t\tAllowForcePushes: &strict,\n\t\tAllowDeletions: &strict,\n\t}\n\n\tc.Config.Logger.Info(\"Applying branch protection\", \"branch\", c.Config.Base)\n\t_, _, err = c.GithubClient.GetRepo().UpdateBranchProtection(ctx, c.Config.Owner, c.Config.Repo, c.Config.Base, protectionRequest)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create the base branch protection: %w\", err)\n\t}\n\n\treturn nil\n}", "func (mr *MockRepositoryClientMockRecorder) RemoveBranchProtection(org, repo, branch interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"RemoveBranchProtection\", reflect.TypeOf((*MockRepositoryClient)(nil).RemoveBranchProtection), org, repo, branch)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetProtectBranchesByRepoID returns a list of ProtectBranch in given repository.
func GetProtectBranchesByRepoID(repoID int64) ([]*ProtectBranch, error) { protectBranches := make([]*ProtectBranch, 0, 2) return protectBranches, x.Where("repo_id = ? and protected = ?", repoID, true).Asc("name").Find(&protectBranches) }
[ "func (c *client) GetBranches(org, repo string, onlyProtected bool) ([]Branch, error) {\n\tdurationLogger := c.log(\"GetBranches\", org, repo, onlyProtected)\n\tdefer durationLogger()\n\n\tvar branches []Branch\n\terr := c.readPaginatedResultsWithValues(\n\t\tfmt.Sprintf(\"/repos/%s/%s/branches\", org, repo),\n\t\turl.Values{\n\t\t\t\"protected\": []string{strconv.FormatBool(onlyProtected)},\n\t\t\t\"per_page\": []string{\"100\"},\n\t\t},\n\t\tacceptNone,\n\t\torg,\n\t\tfunc() interface{} { // newObj\n\t\t\treturn &[]Branch{}\n\t\t},\n\t\tfunc(obj interface{}) {\n\t\t\tbranches = append(branches, *(obj.(*[]Branch))...)\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn branches, nil\n}", "func (r *SettingRepository) GetBranchesByOrgID(orgID string) []models.Branch {\n\tvar branches []models.Branch\n\torgid := bson.ObjectIdHex(orgID)\n\titer := r.C.Find(bson.M{\"orgid\": orgid}).Iter()\n\tresult := models.Branch{}\n\tfor iter.Next(&result) {\n\t\tbranches = append(branches, result)\n\t}\n\treturn branches\n}", "func ListBranchProtections(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/branch_protections repository repoListBranchProtection\n\t// ---\n\t// summary: List branch protections for a repository\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/BranchProtectionList\"\n\n\trepo := ctx.Repo.Repository\n\tbps, err := git_model.FindRepoProtectedBranchRules(ctx, repo.ID)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranches\", err)\n\t\treturn\n\t}\n\tapiBps := make([]*api.BranchProtection, len(bps))\n\tfor i := range bps {\n\t\tapiBps[i] = convert.ToBranchProtection(bps[i])\n\t}\n\n\tctx.JSON(http.StatusOK, apiBps)\n}", "func (c *Client) ListRepoBranches(user, repo string) ([]*Branch, error) {\n\tbranches := make([]*Branch, 0, 10)\n\treturn branches, c.getParsedResponse(\"GET\", fmt.Sprintf(\"/repos/%s/%s/branches\", user, repo), nil, nil, &branches)\n}", "func GetBranches(repo *models.Repository, skip, limit int) ([]*git.Branch, int, error) {\n\treturn git.GetBranchesByPath(repo.RepoPath(), skip, limit)\n}", "func (g *GitLab) Branches(ctx context.Context, user *model.User, repo *model.Repo, p *model.ListOptions) ([]string, error) {\n\ttoken := common.UserToken(ctx, repo, user)\n\tclient, err := newClient(g.url, token, g.SkipVerify)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_repo, err := g.getProject(ctx, client, repo.Owner, repo.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgitlabBranches, _, err := client.Branches.ListBranches(_repo.ID,\n\t\t&gitlab.ListBranchesOptions{ListOptions: gitlab.ListOptions{Page: p.Page, PerPage: p.PerPage}},\n\t\tgitlab.WithContext(ctx))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbranches := make([]string, 0)\n\tfor _, branch := range gitlabBranches {\n\t\tbranches = append(branches, branch.Name)\n\t}\n\treturn branches, nil\n}", "func handleRepo(ctx context.Context, client *github.Client, repo *github.Repository) error {\n\topt := &github.ListOptions{\n\t\tPerPage: 100,\n\t}\n\n\tbranches, resp, err := client.Repositories.ListBranches(ctx, *repo.Owner.Login, *repo.Name, opt)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, branch := range branches {\n\t\tif branch.GetName() == \"master\" && in(orgs, *repo.Owner.Login) {\n\t\t\t// we must get the individual branch for the branch protection to work\n\t\t\tb, _, err := client.Repositories.GetBranch(ctx, *repo.Owner.Login, *repo.Name, branch.GetName())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// return early if it is already protected\n\t\t\tif b.GetProtected() {\n\t\t\t\tfmt.Printf(\"[OK] %s:%s is already protected\\n\", *repo.FullName, b.GetName())\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tfmt.Printf(\"[UPDATE] %s:%s will be changed to protected\\n\", *repo.FullName, b.GetName())\n\t\t\tif dryrun {\n\t\t\t\t// return early\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// set the branch to be protected\n\t\t\tif _, _, err := client.Repositories.UpdateBranchProtection(ctx, *repo.Owner.Login, *repo.Name, b.GetName(), &github.ProtectionRequest{\n\t\t\t\tRequiredStatusChecks: &github.RequiredStatusChecks{\n\t\t\t\t\tStrict: false,\n\t\t\t\t\tContexts: []string{},\n\t\t\t\t},\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func GetBranches(token, owner, repo string) ([]*github.Branch, error) {\n\tclient, ctx := getOauthClient(token)\n\tres, _, err := client.Repositories.ListBranches(ctx, owner, repo, nil)\n\tif res == nil || err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}", "func GetBranches(remoteRepository string) ([]BranchInformation, error) {\n\tdir, err := ioutil.TempDir(\"\", \"gitbranch\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer os.RemoveAll(dir) // clean up\n\n\tif _, err := execCommand(\"git init\", dir); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif _, err := execCommand(\"git remote add origin \"+remoteRepository, dir); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar branches []string\n\tif branches, err = execCommand(\"git ls-remote | awk '{print $2}' | grep refs/heads | cut -c 12-\", dir); err != nil {\n\t\treturn nil, err\n\t}\n\tvar result []BranchInformation\n\n\tfor _, branch := range branches {\n\t\tif !strings.HasSuffix(branch, \"^{}\") {\n\t\t\tresult = append(result, BranchInformation{Enabled: true, Key: branch, Value: branch, Image: \"https://raw.githubusercontent.com/sascha-andres/gitbranch/develop/icons/branch.svg\"})\n\t\t}\n\t}\n\n\tif branches, err = execCommand(\"git ls-remote | awk '{print $2}' | grep refs/tags | cut -c 11-\", dir); err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, branch := range branches {\n\t\tif !strings.HasSuffix(branch, \"^{}\") {\n\t\t\tresult = append(result, BranchInformation{Enabled: true, Key: branch, Value: branch, Image: \"https://raw.githubusercontent.com/sascha-andres/gitbranch/develop/icons/bookmark.svg\"})\n\t\t}\n\t}\n\n\treturn result, nil\n}", "func (repo *GitRepoRepository) ListGitReposByProjectID(\n\tprojectID uint,\n) ([]*models.GitRepo, error) {\n\tgrs := []*models.GitRepo{}\n\n\tif err := repo.db.Where(\"project_id = ?\", projectID).Find(&grs).Error; err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn grs, nil\n}", "func ListBranches(repoName string) []string {\n\tlogger := plgo.NewNoticeLogger(\"konfigraf: \", log.Ltime)\n\trequire(logger, repoName, \"Repository name\")\n\n\tdb, err := plgo.Open()\n\tif err != nil {\n\t\tlogger.Fatalf(\"Cannot open DB: %s\", err)\n\t}\n\tdefer db.Close()\n\tdatabase := newProxy(db)\n\n\tbranches, err := service.GetBranches(database, repoName)\n\n\tif err != nil {\n\t\tlogger.Fatalf(\"Error: %s\", err)\n\t}\n\n\treturn branches\n}", "func GetBranchProtection(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/branch_protections/{name} repository repoGetBranchProtection\n\t// ---\n\t// summary: Get a specific branch protection for the repository\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: name\n\t// in: path\n\t// description: name of protected branch\n\t// type: string\n\t// required: true\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/BranchProtection\"\n\t// \"404\":\n\t// \"$ref\": \"#/responses/notFound\"\n\n\trepo := ctx.Repo.Repository\n\tbpName := ctx.Params(\":name\")\n\tbp, err := git_model.GetProtectedBranchRuleByName(ctx, repo.ID, bpName)\n\tif err != nil {\n\t\tctx.Error(http.StatusInternalServerError, \"GetProtectedBranchByID\", err)\n\t\treturn\n\t}\n\tif bp == nil || bp.RepoID != repo.ID {\n\t\tctx.NotFound()\n\t\treturn\n\t}\n\n\tctx.JSON(http.StatusOK, convert.ToBranchProtection(bp))\n}", "func (a *Client) RepoListBranches(params *RepoListBranchesParams, authInfo runtime.ClientAuthInfoWriter) (*RepoListBranchesOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewRepoListBranchesParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"repoListBranches\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/repos/{owner}/{repo}/branches\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"text/plain\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &RepoListBranchesReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*RepoListBranchesOK), nil\n\n}", "func (client *Client) Branches(issueID, repositoryType string) (branches []*Branch, err error) {\n\tpath := fmt.Sprintf(\"/rest/dev-status/latest/issue/detail?issueId=%s&applicationType=%s&dataType=branch\", issueID, repositoryType)\n\tres, err := client.getRequest(path, http.StatusOK)\n\tif err != nil {\n\t\treturn []*Branch{}, fmt.Errorf(\"Branches failed request: %w\", err)\n\t}\n\tdefer res.Body.Close()\n\n\tvar result DevStatus\n\tbodyBytes, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn []*Branch{}, fmt.Errorf(\"Branches failed to read response body %w\", err)\n\t}\n\n\tif err := json.Unmarshal(bodyBytes, &result); err != nil {\n\t\treturn []*Branch{}, fmt.Errorf(\"Branches failed unmarshal response body: %w\", err)\n\t}\n\n\tif len(result.Errors) > 0 {\n\t\treturn []*Branch{}, fmt.Errorf(\"Branches found unexpected errors: %s\", result.Errors)\n\t}\n\tbranches = make([]*Branch, 0)\n\tfor _, detail := range result.Details {\n\t\tbranches = append(branches, detail.Branches...)\n\t}\n\treturn branches, nil\n}", "func (o GetProjectProtectedBranchesResultOutput) ProtectedBranches() GetProjectProtectedBranchesProtectedBranchArrayOutput {\n\treturn o.ApplyT(func(v GetProjectProtectedBranchesResult) []GetProjectProtectedBranchesProtectedBranch {\n\t\treturn v.ProtectedBranches\n\t}).(GetProjectProtectedBranchesProtectedBranchArrayOutput)\n}", "func (server *RepositoriesService) ListBranches(ctx context.Context, project string, repo string, opt *ListOpts) (*Branches, *http.Response, error) {\n\tu := fmt.Sprintf(\"rest/api/1.0/projects/%s/repos/%s/branches\", project, repo)\n\treq, err := server.v1Client.NewRequest(http.MethodGet, u, nil, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tvar branches Branches\n\tresp, err := server.v1Client.Do(req, &branches)\n\tlog.Infof(\"branch: %+v, error: %+v\", branches, err)\n\treturn &branches, resp, err\n}", "func (m *MockRepositoryClient) GetBranches(org, repo string, onlyProtected bool) ([]github.Branch, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetBranches\", org, repo, onlyProtected)\n\tret0, _ := ret[0].([]github.Branch)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func ListBranches(ctx *context.APIContext) {\n\t// swagger:operation GET /repos/{owner}/{repo}/branches repository repoListBranches\n\t// ---\n\t// summary: List a repository's branches\n\t// produces:\n\t// - application/json\n\t// parameters:\n\t// - name: owner\n\t// in: path\n\t// description: owner of the repo\n\t// type: string\n\t// required: true\n\t// - name: repo\n\t// in: path\n\t// description: name of the repo\n\t// type: string\n\t// required: true\n\t// - name: page\n\t// in: query\n\t// description: page number of results to return (1-based)\n\t// type: integer\n\t// - name: limit\n\t// in: query\n\t// description: page size of results\n\t// type: integer\n\t// responses:\n\t// \"200\":\n\t// \"$ref\": \"#/responses/BranchList\"\n\n\tvar totalNumOfBranches int64\n\tvar apiBranches []*api.Branch\n\n\tlistOptions := utils.GetListOptions(ctx)\n\n\tif !ctx.Repo.Repository.IsEmpty {\n\t\tif ctx.Repo.GitRepo == nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"Load git repository failed\", nil)\n\t\t\treturn\n\t\t}\n\n\t\tbranchOpts := git_model.FindBranchOptions{\n\t\t\tListOptions: listOptions,\n\t\t\tRepoID: ctx.Repo.Repository.ID,\n\t\t\tIsDeletedBranch: util.OptionalBoolFalse,\n\t\t}\n\t\tvar err error\n\t\ttotalNumOfBranches, err = git_model.CountBranches(ctx, branchOpts)\n\t\tif err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"CountBranches\", err)\n\t\t\treturn\n\t\t}\n\t\tif totalNumOfBranches == 0 { // sync branches immediately because non-empty repository should have at least 1 branch\n\t\t\ttotalNumOfBranches, err = repo_module.SyncRepoBranches(ctx, ctx.Repo.Repository.ID, 0)\n\t\t\tif err != nil {\n\t\t\t\tctx.ServerError(\"SyncRepoBranches\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\trules, err := git_model.FindRepoProtectedBranchRules(ctx, ctx.Repo.Repository.ID)\n\t\tif err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"FindMatchedProtectedBranchRules\", err)\n\t\t\treturn\n\t\t}\n\n\t\tbranches, err := git_model.FindBranches(ctx, branchOpts)\n\t\tif err != nil {\n\t\t\tctx.Error(http.StatusInternalServerError, \"GetBranches\", err)\n\t\t\treturn\n\t\t}\n\n\t\tapiBranches = make([]*api.Branch, 0, len(branches))\n\t\tfor i := range branches {\n\t\t\tc, err := ctx.Repo.GitRepo.GetBranchCommit(branches[i].Name)\n\t\t\tif err != nil {\n\t\t\t\t// Skip if this branch doesn't exist anymore.\n\t\t\t\tif git.IsErrNotExist(err) {\n\t\t\t\t\ttotalNumOfBranches--\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"GetCommit\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tbranchProtection := rules.GetFirstMatched(branches[i].Name)\n\t\t\tapiBranch, err := convert.ToBranch(ctx, ctx.Repo.Repository, branches[i].Name, c, branchProtection, ctx.Doer, ctx.Repo.IsAdmin())\n\t\t\tif err != nil {\n\t\t\t\tctx.Error(http.StatusInternalServerError, \"convert.ToBranch\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tapiBranches = append(apiBranches, apiBranch)\n\t\t}\n\t}\n\n\tctx.SetLinkHeader(int(totalNumOfBranches), listOptions.PageSize)\n\tctx.SetTotalCountHeader(totalNumOfBranches)\n\tctx.JSON(http.StatusOK, apiBranches)\n}", "func (c *Client) BranchesContaining(ctx context.Context, db database.DB, repositoryID int, commit string) ([]string, error) {\n\trepo, err := c.repositoryIDToRepo(ctx, repositoryID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn gitserver.NewClient(db).BranchesContaining(ctx, repo, api.CommitID(commit), authz.DefaultSubRepoPermsChecker)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
QuickExec quick exec an simple command line
func QuickExec(cmdLine string, workDir ...string) (string, error) { return ExecLine(cmdLine, workDir...) }
[ "func Exec(cmd string) {\n\n\tfmt.Printf(\"Você digitou: %s \", cmd)\n\n}", "func Setup(c *exec.Cmd) {}", "func ExecBuiltin(args []string) {\n\tif len(args) <= 0 {\n\t\tPanic(\"No parameters\")\n\t}\n\n\t//TODO: Loadings\n\tswitch args[0] {\n\tcase \"Error\":\n\t\tError(strings.Join(args[1:], \" \"))\n\tcase \"Warn\":\n\t\tWarn(strings.Join(args[1:], \" \"))\n\tcase \"Info\":\n\t\tInfo(strings.Join(args[1:], \" \"))\n\tcase \"Made\":\n\t\tMade(strings.Join(args[1:], \" \"))\n\tcase \"Ask\":\n\t\tif noColor {\n\t\t\tfmt.Print(\"[?] \")\n\t\t} else {\n\t\t\tfmt.Print(\"\\033[38;5;99;01m[?]\\033[00m \")\n\t\t}\n\t\tfmt.Println(strings.Join(args[1:], \" \"))\n\tcase \"AskYN\":\n\t\tif AskYN(strings.Join(args[1:], \" \")) {\n\t\t\tos.Exit(0)\n\t\t}\n\t\tos.Exit(1)\n\tcase \"Read\":\n\t\treader := bufio.NewReader(os.Stdin)\n\t\ttext, _ := reader.ReadString('\\n')\n\t\tfmt.Print(text)\n\tcase \"ReadSecure\":\n\t\tfmt.Print(ReadSecure())\n\tcase \"AskList\":\n\t\tvalues := \"\"\n\t\tdflt := -1\n\n\t\tif len(args) >= 3 {\n\t\t\tvalues = args[2]\n\t\t\tif len(args) >= 4 {\n\t\t\t\tif i, err := strconv.Atoi(args[3]); err == nil {\n\t\t\t\t\tdflt = i\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tos.Exit(AskList(strings.Split(values, \",\"), dflt, args[1]))\n\tcase \"Bell\":\n\t\tBell()\n\t}\n\tos.Exit(0)\n}", "func exec(a *apl.Apl, L, R apl.Value) (apl.Value, error) {\n\tr := R\n\tvar in io.Reader\n\tif L != nil {\n\t\tr = L\n\t\tc, ok := R.(apl.Channel)\n\t\tif ok {\n\t\t\tin = bufio.NewReader(apl.NewChannelReader(a, c))\n\t\t} else {\n\t\t\tin = strings.NewReader(R.String(a))\n\t\t}\n\t}\n\n\tv, ok := domain.ToStringArray(nil).To(a, r)\n\tif ok == false {\n\t\treturn nil, fmt.Errorf(\"io exec: argv must be strings: %T\", r)\n\t}\n\targv := v.(apl.StringArray).Strings\n\tif len(argv) == 0 {\n\t\treturn nil, fmt.Errorf(\"io exec: argv empty\")\n\t}\n\n\t// If the command starts with a slash, we may relocate it.\n\tif strings.HasPrefix(argv[0], \"/\") {\n\t\tfsys, mpt, err := lookup(argv[0])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif f, ok := fsys.(fs); ok == false {\n\t\t\treturn nil, fmt.Errorf(\"exec: %s: file system is not an os fs: %s\", argv[0], fsys.String())\n\t\t} else {\n\t\t\trelpath := strings.TrimPrefix(argv[0], mpt)\n\t\t\targv[0] = f.path(relpath)\n\t\t}\n\t}\n\n\tcmd := ex.Command(argv[0], argv[1:]...)\n\tcmd.Stdin = in\n\tout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc := apl.LineReader(out)\n\tif err := cmd.Start(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}", "func main() {\n\n\tif err := qml.Run(run); err != nil {\n\t\tfmt.Fprint(os.Stderr, \"error: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}", "func main() {\n\tif err := cmd.Cmd.Execute(); err != nil {\n\t\tos.Exit(1)\n\t}\n}", "func SimpleExec(name string, args ...string) (string, error) {\n\tTrace(name, args...)\n\treturn Output(ExecCommand(name, args...))\n}", "func SimpleExec(name string, args ...string) (string, error) {\n\treturn Output(ExecCommand(name, args...))\n}", "func (c cli) exec(cmd string, args ...string) (string, error) {\n\tallArgs := append([]string{cmd}, args...)\n\treturn execCmd(\"lightning-cli\", allArgs...)\n}", "func main() {\n\texecute.Execute()\n}", "func startQuickQuit(i *app.Indicator) {\n\ti.AddQuick(\"QUIT\", qQuit, func(args ...interface{}) {\n\t\ti := args[0].(*app.Indicator)\n\t\ti.Quit()\n\t}, i)\n}", "func SimpleExec(cmdName string, arguments ...string) {\n\tcmd := exec.Command(cmdName, arguments...) // nolint: gosec\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func startQuickLiqoWebsite(i *app.Indicator) {\n\ti.AddQuick(\"ⓘ About Liqo\", qWeb, func(args ...interface{}) {\n\t\tcmd := exec.Command(\"xdg-open\", \"http://liqo.io\")\n\t\t_ = cmd.Run()\n\t})\n}", "func Exec(name string, args ...string) error {\n\treturn syscall.Exec(name, args, os.Environ())\n}", "func ExampleExecFile() {\n\tconst data = `\nprint(greeting + \", world\")\nprint(repeat(\"one\"))\nprint(repeat(\"mur\", 2))\nsquares = [x*x for x in range(10)]\n`\n\n\t// repeat(str, n=1) is a Go function called from Starlark.\n\t// It behaves like the 'string * int' operation.\n\trepeat := func(thread *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {\n\t\tvar s string\n\t\tvar n int = 1\n\t\tif err := starlark.UnpackArgs(b.Name(), args, kwargs, \"s\", &s, \"n?\", &n); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn starlark.String(strings.Repeat(s, n)), nil\n\t}\n\n\t// The Thread defines the behavior of the built-in 'print' function.\n\tthread := &starlark.Thread{\n\t\tName: \"example\",\n\t\tPrint: func(_ *starlark.Thread, msg string) { fmt.Println(msg) },\n\t}\n\n\t// This dictionary defines the pre-declared environment.\n\tpredeclared := starlark.StringDict{\n\t\t\"greeting\": starlark.String(\"hello\"),\n\t\t\"repeat\": starlark.NewBuiltin(\"repeat\", repeat),\n\t}\n\n\t// Execute a program.\n\tglobals, err := starlark.ExecFile(thread, \"apparent/filename.star\", data, predeclared)\n\tif err != nil {\n\t\tif evalErr, ok := err.(*starlark.EvalError); ok {\n\t\t\tlog.Fatal(evalErr.Backtrace())\n\t\t}\n\t\tlog.Fatal(err)\n\t}\n\n\t// Print the global environment.\n\tfmt.Println(\"\\nGlobals:\")\n\tfor _, name := range globals.Keys() {\n\t\tv := globals[name]\n\t\tfmt.Printf(\"%s (%s) = %s\\n\", name, v.Type(), v.String())\n\t}\n\n\t// Output:\n\t// hello, world\n\t// one\n\t// murmur\n\t//\n\t// Globals:\n\t// squares (list) = [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]\n}", "func (h *Howdoi) Execute() {\n\tflag.Parse()\n\n\tif h.ShowHelp {\n\t\tfmt.Println(help)\n\t\tos.Exit(0)\n\t}\n\n\tif h.ShowVersion {\n\t\tfmt.Println(version)\n\t\tos.Exit(0)\n\t}\n\n\t// position must be > 0\n\tif h.Position == 0 {\n\t\th.Position = 1\n\t}\n\n\terr := h.sanitizeQuestion(flag.Args())\n\tif err != nil {\n\t\tfmt.Println(help)\n\t\tos.Exit(1)\n\t}\n\n\tlinks, err := h.getLinks()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tanswer, err := h.getAnswer(links)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Println(answer)\n}", "func main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tapp := cliapp.NewApp()\n\tapp.Version = \"1.0.3\"\n\tapp.Description = \"this is my cli application\"\n\n\tapp.SetVerbose(cliapp.VerbDebug)\n\t// app.DefaultCmd(\"exampl\")\n\n\tapp.Add(cmd.GitCommand())\n\t// app.Add(cmd.ColorCommand())\n\tapp.Add(builtin.GenShAutoComplete())\n\t// fmt.Printf(\"%+v\\n\", cliapp.CommandNames())\n\tapp.Run()\n}", "func ExecPrompt() {\n\tcfg.HasShell = true\n\tcfg.PrintHeader()\n\tshell := prompt.New(\n\t\tfunc(in string) {\n\t\t\tif err := ExecLine(in); err != nil {\n\t\t\t\tfmt.Println(\"🙈 Error:\", err)\n\t\t\t}\n\t\t},\n\t\tcompleter,\n\t\tprompt.OptionHistory(readHistory()),\n\t\tprompt.OptionTitle(\"cloudmonkey\"),\n\t\tprompt.OptionPrefix(cfg.GetPrompt()),\n\t\tprompt.OptionLivePrefix(func() (string, bool) {\n\t\t\treturn cfg.GetPrompt(), true\n\t\t}),\n\t\tprompt.OptionMaxSuggestion(5),\n\t\tprompt.OptionPrefixTextColor(prompt.DefaultColor),\n\t\tprompt.OptionPreviewSuggestionTextColor(prompt.DarkBlue),\n\t\tprompt.OptionSelectedSuggestionTextColor(prompt.White),\n\t\tprompt.OptionSelectedSuggestionBGColor(prompt.DarkBlue),\n\t\tprompt.OptionSelectedDescriptionTextColor(prompt.White),\n\t\tprompt.OptionSelectedDescriptionBGColor(prompt.DarkGray),\n\t\tprompt.OptionSuggestionTextColor(prompt.Black),\n\t\tprompt.OptionSuggestionBGColor(prompt.White),\n\t\tprompt.OptionDescriptionTextColor(prompt.Black),\n\t\tprompt.OptionDescriptionBGColor(prompt.LightGray),\n\t\tprompt.OptionScrollbarThumbColor(prompt.DarkBlue),\n\t\tprompt.OptionScrollbarBGColor(prompt.LightGray),\n\t)\n\tshell.Run()\n}", "func Console(args ...string) {\n cfg.StartCmd = \"/bin/bash -c\"\n cfg.QuotedOpts = \"'\" + cfg.Console + \"'\"\n runInteractive(\"run\", settingsToParams(0, false)...)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ExecLine quick exec an command line string
func ExecLine(cmdLine string, workDir ...string) (string, error) { p := cmdline.NewParser(cmdLine) // create a new Cmd instance cmd := p.NewExecCmd() if len(workDir) > 0 { cmd.Dir = workDir[0] } bs, err := cmd.Output() return string(bs), err }
[ "func QuickExec(cmdLine string, workDir ...string) (string, error) {\n\treturn ExecLine(cmdLine, workDir...)\n}", "func exec(a *apl.Apl, L, R apl.Value) (apl.Value, error) {\n\tr := R\n\tvar in io.Reader\n\tif L != nil {\n\t\tr = L\n\t\tc, ok := R.(apl.Channel)\n\t\tif ok {\n\t\t\tin = bufio.NewReader(apl.NewChannelReader(a, c))\n\t\t} else {\n\t\t\tin = strings.NewReader(R.String(a))\n\t\t}\n\t}\n\n\tv, ok := domain.ToStringArray(nil).To(a, r)\n\tif ok == false {\n\t\treturn nil, fmt.Errorf(\"io exec: argv must be strings: %T\", r)\n\t}\n\targv := v.(apl.StringArray).Strings\n\tif len(argv) == 0 {\n\t\treturn nil, fmt.Errorf(\"io exec: argv empty\")\n\t}\n\n\t// If the command starts with a slash, we may relocate it.\n\tif strings.HasPrefix(argv[0], \"/\") {\n\t\tfsys, mpt, err := lookup(argv[0])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif f, ok := fsys.(fs); ok == false {\n\t\t\treturn nil, fmt.Errorf(\"exec: %s: file system is not an os fs: %s\", argv[0], fsys.String())\n\t\t} else {\n\t\t\trelpath := strings.TrimPrefix(argv[0], mpt)\n\t\t\targv[0] = f.path(relpath)\n\t\t}\n\t}\n\n\tcmd := ex.Command(argv[0], argv[1:]...)\n\tcmd.Stdin = in\n\tout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc := apl.LineReader(out)\n\tif err := cmd.Start(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}", "func (ui *UI) exec(ctx context.Context, line string, reqCh chan execReq) int {\n\treq := execReq{\n\t\tctx: ctx,\n\t\tline: line,\n\t\tui: ui,\n\t\trespCh: make(chan int),\n\t}\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn 0\n\tcase reqCh <- req:\n\t}\n\treturn <-req.respCh\n}", "func (cl *Client) ExecString(cmd string, args ...interface{}) (string, error) {\n\tvar s string\n\terr := cl.Conn(func(c *Conn) error {\n\t\tvar err error\n\t\ts, err = c.ExecString(cmd, args...)\n\t\treturn err\n\t})\n\treturn s, err\n}", "func Line(cmd *exec.Cmd) string {\n\treturn strings.Join(cmd.Args, \" \")\n}", "func execCmdline(_ int, p *gop.Context) {\n\targs := p.GetArgs(2)\n\tpprof.Cmdline(args[0].(http.ResponseWriter), args[1].(*http.Request))\n}", "func WrapExec(cmd string, args []String, nArg uint32) (status syscall.Status){\n\n\n\tpath := \"/programs/\"+cmd\n\n\tif nArg == 0 {\n\n\t\tstatus = altEthos.Exec(path)\n\n\t} else if nArg == 1 {\n\n\t\tstatus = altEthos.Exec(path, &args[0])\n\n\t} else if nArg == 2 {\n\n\t\tstatus = altEthos.Exec(path, &args[0], &args[1])\n\n\t} else if nArg == 3 {\n\n\t\tstatus = altEthos.Exec(path, &args[0], &args[1], &args[2])\n\n\t} else if nArg == 4 {\n\n\t\tstatus = altEthos.Exec(path, &args[0], &args[1], &args[2], &args[3])\n\n\t}\n\n\treturn\n\n}", "func processLine(cmdLine string) {\n\tcmdLine = strings.TrimSpace(cmdLine)\n\n\tcommandList := make([]exec.Cmd, 0)\n\n\tif len(cmdLine) == 0 {\n\t\treturn\n\t}\n\n\tpipeStages := strings.Split(cmdLine, pipeChar)\n\n\terr := createPipeStages(&commandList, pipeStages)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v: %v.\\n\", shellName, err)\n\t\treturn\n\t}\n\n\terr = connectPipeline(commandList)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v: Error with pipes: %v.\\n\", shellName, err)\n\t\treturn\n\t}\n\n\terr = executePipeline(commandList)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v: Error during execution: %v\\n\", shellName, err)\n\t\treturn\n\t}\n}", "func ExecuteCommandline(time time.Duration, command string, extraArgs []string) (string, error) {\n\t// Create a new context and add a timeout to it\n\tctx, cancel := context.WithTimeout(context.Background(), time)\n\tdefer cancel() // The cancel should be deferred so resources are cleaned up\n\n\t// Create the command with our context\n\targs := strings.Split(command, \" \")\n\tvar cmd *exec.Cmd\n\n\tif len(args) == 1 {\n\t\tcmd = exec.CommandContext(ctx, args[0], extraArgs...)\n\t} else {\n\t\tcmd = exec.CommandContext(ctx, args[0], append(args[1:], extraArgs...)...)\n\t}\n\n\tcmd.Wait()\n\t// This time we can simply use Output() to get the result.\n\tout, err := cmd.CombinedOutput()\n\n\t// We want to check the context error to see if the timeout was executed.\n\t// The error returned by cmd.Output() will be OS specific based on what\n\t// happens when a process is killed.\n\tif ctx.Err() == context.DeadlineExceeded {\n\t\treturn \"\", ctx.Err()\n\t}\n\n\t// If there's no context error, we know the command completed (or errored).\n\tif err != nil {\n\t\treturn string(out), err\n\t}\n\treturn string(out), nil\n}", "func ShellExec(cmdLine string, shells ...string) (string, error) {\n\t// shell := \"/bin/sh\"\n\tshell := \"sh\"\n\tif len(shells) > 0 {\n\t\tshell = shells[0]\n\t}\n\n\tvar out bytes.Buffer\n\tcmd := exec.Command(shell, \"-c\", cmdLine)\n\tcmd.Stdout = &out\n\n\tif err := cmd.Run(); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn out.String(), nil\n}", "func (c cli) exec(cmd string, args ...string) (string, error) {\n\tallArgs := append([]string{cmd}, args...)\n\treturn execCmd(\"lightning-cli\", allArgs...)\n}", "func Exec(cmd string) {\n\n\tfmt.Printf(\"Você digitou: %s \", cmd)\n\n}", "func (r RealExecute) ExecCommand(com string, args ...string) ([]byte, error) {\n\t/* #nosec */\n\tcommand := exec.Command(com, args...)\n\treturn command.CombinedOutput()\n}", "func (s pathRuntime) Exec(args []string) error {\n\truntimeArgs := []string{s.path}\n\tif len(args) > 1 {\n\t\truntimeArgs = append(runtimeArgs, args[1:]...)\n\t}\n\n\treturn s.execRuntime.Exec(runtimeArgs)\n}", "func Exec(t testing.TB, cmd *cobra.Command, stdIn io.Reader, args ...string) (string, string, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tt.Cleanup(cancel)\n\n\treturn ExecCtx(ctx, cmd, stdIn, args...)\n}", "func executor(line string) {\n\tif line == \"quit\" {\n\t\tfmt.Printf(\"%s\", \"Adios!\")\n\t\tfmt.Printf(\"%s\", \"\\n\")\n\t\tos.Exit(0)\n\t}\n\n\tif line == \"help\" {\n\t\tfmt.Printf(\"Try typing something along the lines of:\")\n\t\tfmt.Printf(\"%s\", \"\\n\")\n\t\tfmt.Printf(\"%s\", \"\\n\")\n\t\tfmt.Print(\" ⧐ current_date = $(date)\")\n\t\tfmt.Printf(\"%s\", \"\\n\")\n\t\tfmt.Printf(\"%s\", \"\\n\")\n\t\tfmt.Print(\"A command should be triggered in your system. Then try printing the result of that command with:\")\n\t\tfmt.Printf(\"%s\", \"\\n\")\n\t\tfmt.Printf(\"%s\", \"\\n\")\n\t\tfmt.Printf(\" ⧐ current_date\")\n\t\tfmt.Printf(\"%s\", \"\\n\")\n\t\treturn\n\t}\n\n\tRun(line, true)\n}", "func ParseExecutableLine(name string, fullLine string) (Executable, error) {\n\tline := strings.Replace(fullLine, asdfPluginPrefix, \"\", -1)\n\ttokens := strings.Split(line, \" \")\n\tif len(tokens) != 2 {\n\t\treturn Executable{}, fmt.Errorf(\"bad line %s\", fullLine)\n\t}\n\treturn Executable{\n\t\tName: name,\n\t\tPluginName: strings.TrimSpace(tokens[0]),\n\t\tPluginVersion: strings.TrimSpace(tokens[1]),\n\t}, nil\n}", "func Exec(container string, cmdLine ...string) (string, error) {\n\tparts := []string{\"exec\", \"-t\", container}\n\tparts = append(parts, cmdLine...)\n\tcmd := exec.Command(\"docker\", parts...)\n\toutput, err := cmd.CombinedOutput()\n\treturn string(output), err\n}", "func execute(w io.Writer, commandline string, req io.Reader) error {\n\targv, err := cmd.SplitQuoted(commandline)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// We treat a pipe command specially.\n\t// It will be splitted by the pipe binary.\n\tif strings.HasPrefix(commandline, \"pipe \") {\n\t\targv = []string{\"pipe\", commandline[5:]}\n\t}\n\n\tif len(argv) < 1 {\n\t\treturn fmt.Errorf(\"request contains no command\")\n\t}\n\n\t// Get installation directory of editor binary.\n\t// All subcommands must be in the same directory.\n\tvar installDir string\n\tprogname := os.Args[0]\n\tif p, err := filepath.Abs(progname); err != nil {\n\t\treturn fmt.Errorf(\"cannot get editor directory\")\n\t} else {\n\t\tinstallDir = filepath.Dir(p)\n\t}\n\n\tvar buf bytes.Buffer\n\tvar errbuf bytes.Buffer\n\targv[0] = filepath.Join(installDir, argv[0])\n\tctx, cancel := context.WithCancel(context.Background())\n\tc := exec.CommandContext(ctx, argv[0], argv[1:]...)\n\tc.Stdin = req\n\tc.Stdout = &buf\n\tc.Stderr = &errbuf\n\tif err := c.Start(); err != nil {\n\t\treturn err\n\t}\n\tpid := c.Process.Pid\n\tProcessList.Add(pid, argv, cancel)\n\n\terr = c.Wait()\n\tProcessList.Remove(pid)\n\tio.Copy(w, &buf)\n\n\t// Write stderr of commands to the console.\n\tif errbuf.Len() > 0 {\n\t\tif err != nil {\n\t\t\terrmsg, _ := ioutil.ReadAll(&errbuf)\n\t\t\terr = fmt.Errorf(\"%s\\n%s\\n\", err.Error(), string(errmsg))\n\t\t} else {\n\t\t\tio.Copy(os.Stdout, &errbuf)\n\t\t}\n\t}\n\treturn err\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ShellExec exec command by shell cmdLine. eg: "ls al"
func ShellExec(cmdLine string, shells ...string) (string, error) { // shell := "/bin/sh" shell := "sh" if len(shells) > 0 { shell = shells[0] } var out bytes.Buffer cmd := exec.Command(shell, "-c", cmdLine) cmd.Stdout = &out if err := cmd.Run(); err != nil { return "", err } return out.String(), nil }
[ "func exec(a *apl.Apl, L, R apl.Value) (apl.Value, error) {\n\tr := R\n\tvar in io.Reader\n\tif L != nil {\n\t\tr = L\n\t\tc, ok := R.(apl.Channel)\n\t\tif ok {\n\t\t\tin = bufio.NewReader(apl.NewChannelReader(a, c))\n\t\t} else {\n\t\t\tin = strings.NewReader(R.String(a))\n\t\t}\n\t}\n\n\tv, ok := domain.ToStringArray(nil).To(a, r)\n\tif ok == false {\n\t\treturn nil, fmt.Errorf(\"io exec: argv must be strings: %T\", r)\n\t}\n\targv := v.(apl.StringArray).Strings\n\tif len(argv) == 0 {\n\t\treturn nil, fmt.Errorf(\"io exec: argv empty\")\n\t}\n\n\t// If the command starts with a slash, we may relocate it.\n\tif strings.HasPrefix(argv[0], \"/\") {\n\t\tfsys, mpt, err := lookup(argv[0])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif f, ok := fsys.(fs); ok == false {\n\t\t\treturn nil, fmt.Errorf(\"exec: %s: file system is not an os fs: %s\", argv[0], fsys.String())\n\t\t} else {\n\t\t\trelpath := strings.TrimPrefix(argv[0], mpt)\n\t\t\targv[0] = f.path(relpath)\n\t\t}\n\t}\n\n\tcmd := ex.Command(argv[0], argv[1:]...)\n\tcmd.Stdin = in\n\tout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc := apl.LineReader(out)\n\tif err := cmd.Start(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}", "func ExecLine(cmdLine string, workDir ...string) (string, error) {\n\tp := cmdline.NewParser(cmdLine)\n\n\t// create a new Cmd instance\n\tcmd := p.NewExecCmd()\n\tif len(workDir) > 0 {\n\t\tcmd.Dir = workDir[0]\n\t}\n\n\tbs, err := cmd.Output()\n\treturn string(bs), err\n}", "func RunShell(opt *Opt, shell []byte) (stdout,stderr io.Reader, err error){\n\tshellScript := string(shell)\n\tif err != nil{\n\t\treturn\n\t}\n\treturn Spwan(opt,\"echo\",[]string{shellScript,\"|\",\"bash\",\"-\"},nil)\n}", "func ExecuteShell(ctx context.Context, name string, args ...string) error {\n\treturn DefaultCmd.ExecuteShell(ctx, name, args...)\n}", "func cmdForShell(cmdStr string) (string, error) {\n\tcmd := exec.Command(\"sh\", \"-c\", cmdStr)\n\tvar stdout, stderr bytes.Buffer\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\terr := cmd.Run()\n\n\toutStr, _ := string(stdout.Bytes()), string(stderr.Bytes())\n\tif err != nil {\n\t\treturn outStr, err\n\t}\n\n\treturn outStr, nil\n}", "func Shell(cmd string) ([]byte, error) {\n\tcmdParts := strings.SplitN(cmd, \" \", 2)\n\tcmdName := cmdParts[0]\n\targs := cmdParts[1]\n\tc := exec.Command(cmdName, args)\n\terr := c.Run()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c.Output()\n}", "func shellCommand(name string, args ...string) ShellCmd {\n\tcmd := exec.Command(name, args...)\n\toutpipe, _ := cmd.StdoutPipe()\n\terrpipe, _ := cmd.StderrPipe()\n\toutreader := bufio.NewReader(outpipe)\n\terrreader := bufio.NewReader(errpipe)\n\n\treturn ShellCmd{cmd, outreader, errreader, nil}\n}", "func WrapExec(cmd string, args []String, nArg uint32) (status syscall.Status){\n\n\n\tpath := \"/programs/\"+cmd\n\n\tif nArg == 0 {\n\n\t\tstatus = altEthos.Exec(path)\n\n\t} else if nArg == 1 {\n\n\t\tstatus = altEthos.Exec(path, &args[0])\n\n\t} else if nArg == 2 {\n\n\t\tstatus = altEthos.Exec(path, &args[0], &args[1])\n\n\t} else if nArg == 3 {\n\n\t\tstatus = altEthos.Exec(path, &args[0], &args[1], &args[2])\n\n\t} else if nArg == 4 {\n\n\t\tstatus = altEthos.Exec(path, &args[0], &args[1], &args[2], &args[3])\n\n\t}\n\n\treturn\n\n}", "func Shell(shellStdin string) (stdout, stderr string, err error) {\n\treturn Exec(\"sh\", \"\", shellStdin)\n}", "func processLine(cmdLine string) {\n\tcmdLine = strings.TrimSpace(cmdLine)\n\n\tcommandList := make([]exec.Cmd, 0)\n\n\tif len(cmdLine) == 0 {\n\t\treturn\n\t}\n\n\tpipeStages := strings.Split(cmdLine, pipeChar)\n\n\terr := createPipeStages(&commandList, pipeStages)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v: %v.\\n\", shellName, err)\n\t\treturn\n\t}\n\n\terr = connectPipeline(commandList)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v: Error with pipes: %v.\\n\", shellName, err)\n\t\treturn\n\t}\n\n\terr = executePipeline(commandList)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v: Error during execution: %v\\n\", shellName, err)\n\t\treturn\n\t}\n}", "func runShellCommand(args string) error {\n\tvar out bytes.Buffer //save command output\n\tvar stderr bytes.Buffer //save command errors\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", args)\n\tcmd.Stdout = &out\n\tcmd.Stderr = &stderr\n\terr := cmd.Run()\n\n\treturn err\n}", "func Shell(t *testing.T, name string, arg ...string) {\n\tt.Helper()\n\n\tbin, err := exec.LookPath(name)\n\tif err != nil {\n\t\tt.Skipf(\"skipping, binary %q not found: %v\", name, err)\n\t}\n\n\tt.Logf(\"$ %s %v\", bin, arg)\n\n\tcmd := exec.Command(bin, arg...)\n\tif err := cmd.Start(); err != nil {\n\t\tt.Fatalf(\"failed to start command %q: %v\", name, err)\n\t}\n\n\tif err := cmd.Wait(); err != nil {\n\t\t// Shell operations in these tests require elevated privileges.\n\t\tif cmd.ProcessState.ExitCode() == 1 /* unix.EPERM */ {\n\t\t\tt.Skipf(\"skipping, permission denied: %v\", err)\n\t\t}\n\n\t\tt.Fatalf(\"failed to wait for command %q: %v\", name, err)\n\t}\n}", "func execShCmd(strCmd string) ([]byte, error) {\n\tlog.Debug(\"Executing %+v\", strCmd)\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", strCmd)\n\n\tstdoutpipe, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Error(\"Error stdout: %s. for command: %s\", err, strCmd)\n\t\treturn nil, err\n\t}\n\tstderrpipe, err := cmd.StderrPipe()\n\tif err != nil {\n\t\tlog.Error(\"Error stderr: %s. for command: %s\", err, strCmd)\n\t\treturn nil, err\n\t}\n\terr = cmd.Start()\n\tif err != nil {\n\t\tlog.Error(\"Error: %s. for command: %s\", err, strCmd)\n\t\treturn nil, err\n\t}\n\tstdout, errstderr := ioutil.ReadAll(stdoutpipe)\n\tstderr, errstdout := ioutil.ReadAll(stderrpipe)\n\n\tcmderr := cmd.Wait()\n\n\tif errstderr != nil {\n\t\tlog.Debug(\"Stdout err: %v\", errstderr)\n\t}\n\tif errstdout != nil {\n\t\tlog.Debug(\"Stderr err: %v\", errstdout)\n\t}\n\tlog.Debug(\"Stdout is: '%s'\\n\", stdout)\n\tlog.Debug(\"Stderr is: '%s'\\n\", stderr)\n\tif cmderr != nil {\n\t\tlog.Error(\"cmderr: %v, %v\", cmderr, string(stderr))\n\t}\n\treturn stdout, cmderr\n}", "func (c cli) exec(cmd string, args ...string) (string, error) {\n\tallArgs := append([]string{cmd}, args...)\n\treturn execCmd(\"lightning-cli\", allArgs...)\n}", "func execInSystem(execPath string, params []string, logsBuffer *bytes.Buffer, print bool) error {\n\tvar lock sync.Mutex\n\tvar c string\n\tvar cmdName string\n\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\tc = \"-c\"\n\t\tcmdName = \"sh\"\n\tcase \"windows\":\n\t\tc = \"/c\"\n\t\tcmdName = \"cmd\"\n\tdefault:\n\t\tlog.Panicf(\"System type error, got <%s>, but expect linux/windowns!\", runtime.GOOS)\n\t}\n\n\tcmd := exec.Command(cmdName, append(params, c)...)\n\tcmd.Dir = execPath\n\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// print log\n\toutReader := bufio.NewReader(stdout)\n\terrReader := bufio.NewReader(stderr)\n\tprintLog := func(reader *bufio.Reader, typex string) {\n\t\tfor {\n\t\t\tline, err := reader.ReadString('\\n')\n\t\t\tif print {\n\t\t\t\tlog.Printf(\"%s: %s\", typex, line)\n\t\t\t}\n\t\t\tif logsBuffer != nil {\n\t\t\t\tlock.Lock()\n\t\t\t\tlogsBuffer.WriteString(line)\n\t\t\t\tlock.Unlock()\n\t\t\t}\n\t\t\tif err != nil || err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tprintLog(outReader, \"Stdout\")\n\t}()\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tprintLog(errReader, \"Stderr\")\n\t}()\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twg.Wait()\n\treturn cmd.Wait()\n}", "func (e *Executor) ExecWithTimeoutShell(target Target, dir string, env []string, timeout time.Duration, showOutput, foreground bool, sandbox SandboxConfig, cmd string) ([]byte, []byte, error) {\n\treturn e.ExecWithTimeoutShellStdStreams(target, dir, env, timeout, showOutput, foreground, sandbox, cmd, false)\n}", "func (c *Cluster) RunShell(cmd string, values interface{}, extraEnv []string) error {\n\terr := c.writeValuesYaml(values)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Collect environment variables\n\tenv := append(os.Environ(), MapToEnv(values, \"VALUE_\")...)\n\tenv = append(env, extraEnv...)\n\tenv = append(env, \"HOME=\"+c.Path)\n\n\topt := exe.Opt{\n\t\tDir: c.Path,\n\t\tEnv: env,\n\t}\n\n\t_, _, err = exe.Run(\"bash\", exe.Args{\"-c\", cmd}, opt, c.log)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func shellExecutor(rootCmd *cobra.Command, printer *Printer, meta *meta) func(s string) {\n\treturn func(s string) {\n\t\targs := strings.Fields(s)\n\n\t\tsentry.AddCommandContext(strings.Join(removeOptions(args), \" \"))\n\n\t\trootCmd.SetArgs(meta.CliConfig.Alias.ResolveAliases(args))\n\n\t\terr := rootCmd.Execute()\n\t\tif err != nil {\n\t\t\tif _, ok := err.(*interactive.InterruptError); ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tprintErr := printer.Print(err, nil)\n\t\t\tif printErr != nil {\n\t\t\t\t_, _ = fmt.Fprintln(os.Stderr, err)\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\n\t\t// command is nil if it does not have a Run function\n\t\t// ex: instance -h\n\t\tif meta.command == nil {\n\t\t\treturn\n\t\t}\n\n\t\tautoCompleteCache.Update(meta.command.Namespace)\n\n\t\tprintErr := printer.Print(meta.result, meta.command.getHumanMarshalerOpt())\n\t\tif printErr != nil {\n\t\t\t_, _ = fmt.Fprintln(os.Stderr, printErr)\n\t\t}\n\t}\n}", "func RunShellCmd(rb RunningBuild, step *vts.BuildStep, o, e io.Writer) error {\n\tif len(step.Args) > 0 {\n\t\tstep.Args[0] = \"set +h;umask 022;\" + step.Args[0]\n\t}\n\t_, err := rb.ExecBlocking(\"/tmp\", append([]string{\"/bin/bash\", \"-c\"}, step.Args...), o, e)\n\treturn err\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ThreeWay attempts a threeway merge between two candidates and a common ancestor. It considers the three of them recursively, applying some simple rules to identify conflicts: If any of the three nodes are different NomsKinds: conflict If we are dealing with a map: If the same key is both removed and inserted wrt parent: conflict If the same key is inserted wrt parent, but with different values: conflict If we are dealing with a struct: If the same field is both removed and inserted wrt parent: conflict If the same field is inserted wrt parent, but with different values: conflict If we are dealing with a list: If the same index is both removed and inserted wrt parent: conflict If the same index is inserted wrt parent, but with different values: conflict If we are dealing with a set: If the same object is both removed and inserted wrt parent: conflict All other modifications are allowed. Currently, ThreeWay() only works on types.Map.
func ThreeWay(a, b, parent types.Value, vwr types.ValueReadWriter) (merged types.Value, err error) { if a == nil && b == nil { return parent, nil } else if a == nil { return parent, newMergeConflict("Cannot merge nil Value with %s.", b.Type().Describe()) } else if b == nil { return parent, newMergeConflict("Cannot merge %s with nil value.", a.Type().Describe()) } else if unmergeable(a, b) { return parent, newMergeConflict("Cannot merge %s with %s.", a.Type().Describe(), b.Type().Describe()) } return threeWayMerge(a, b, parent, vwr) }
[ "func ForeignKeysMerge(ctx context.Context, mergedRoot, ourRoot, theirRoot, ancRoot *doltdb.RootValue) (*doltdb.ForeignKeyCollection, []FKConflict, error) {\n\tours, err := ourRoot.GetForeignKeyCollection(ctx)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\ttheirs, err := theirRoot.GetForeignKeyCollection(ctx)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tanc, err := ancRoot.GetForeignKeyCollection(ctx)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tancSchs, err := ancRoot.GetAllSchemas(ctx)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcommon, conflicts, err := foreignKeysInCommon(ours, theirs, anc, ancSchs)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tourNewFKs, err := fkCollSetDifference(ours, anc, ancSchs)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\ttheirNewFKs, err := fkCollSetDifference(theirs, anc, ancSchs)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// check for conflicts between foreign keys added on each branch since the ancestor\n\t//TODO: figure out the best way to handle unresolved foreign keys here if one branch added an unresolved one and\n\t// another branch added the same one but resolved\n\t_ = ourNewFKs.Iter(func(ourFK doltdb.ForeignKey) (stop bool, err error) {\n\t\ttheirFK, ok := theirNewFKs.GetByTags(ourFK.TableColumns, ourFK.ReferencedTableColumns)\n\t\tif ok && !ourFK.DeepEquals(theirFK) {\n\t\t\t// Foreign Keys are defined over the same tags,\n\t\t\t// but are not exactly equal\n\t\t\tconflicts = append(conflicts, FKConflict{\n\t\t\t\tKind: TagCollision,\n\t\t\t\tOurs: ourFK,\n\t\t\t\tTheirs: theirFK,\n\t\t\t})\n\t\t}\n\n\t\ttheirFK, ok = theirNewFKs.GetByNameCaseInsensitive(ourFK.Name)\n\t\tif ok && !ourFK.EqualDefs(theirFK) {\n\t\t\t// Two different Foreign Keys have the same name\n\t\t\tconflicts = append(conflicts, FKConflict{\n\t\t\t\tKind: NameCollision,\n\t\t\t\tOurs: ourFK,\n\t\t\t\tTheirs: theirFK,\n\t\t\t})\n\t\t}\n\t\treturn false, err\n\t})\n\n\terr = ourNewFKs.Iter(func(ourFK doltdb.ForeignKey) (stop bool, err error) {\n\t\t// The common set of FKs may already have this FK, if it was added on both branches\n\t\tif commonFK, ok := common.GetByNameCaseInsensitive(ourFK.Name); ok && commonFK.EqualDefs(ourFK) {\n\t\t\t// Skip this one if it's identical to the one in the common set\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn false, common.AddKeys(ourFK)\n\t})\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\terr = theirNewFKs.Iter(func(theirFK doltdb.ForeignKey) (stop bool, err error) {\n\t\t// The common set of FKs may already have this FK, if it was added on both branches\n\t\tif commonFK, ok := common.GetByNameCaseInsensitive(theirFK.Name); ok && commonFK.EqualDefs(theirFK) {\n\t\t\t// Skip this one if it's identical to the one in the common set\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn false, common.AddKeys(theirFK)\n\t})\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcommon, err = pruneInvalidForeignKeys(ctx, common, mergedRoot)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn common, conflicts, err\n}", "func ForeignKeysMerge(ctx context.Context, mergedRoot, ourRoot, theirRoot, ancRoot *doltdb.RootValue) (*doltdb.ForeignKeyCollection, []FKConflict, error) {\n\tours, err := ourRoot.GetForeignKeyCollection(ctx)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\ttheirs, err := theirRoot.GetForeignKeyCollection(ctx)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tanc, err := ancRoot.GetForeignKeyCollection(ctx)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tancSchs, err := ancRoot.GetAllSchemas(ctx)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcommon, conflicts, err := foreignKeysInCommon(ours, theirs, anc)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tourNewFKs, err := fkCollSetDifference(ours, anc, ancSchs)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\ttheirNewFKs, err := fkCollSetDifference(theirs, anc, ancSchs)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// check for conflicts between foreign keys added on each branch since the ancestor\n\t//TODO: figure out the best way to handle unresolved foreign keys here if one branch added an unresolved one and\n\t// another branch added the same one but resolved\n\t_ = ourNewFKs.Iter(func(ourFK doltdb.ForeignKey) (stop bool, err error) {\n\t\ttheirFK, ok := theirNewFKs.GetByTags(ourFK.TableColumns, ourFK.ReferencedTableColumns)\n\t\tif ok && !ourFK.DeepEquals(theirFK) {\n\t\t\t// Foreign Keys are defined over the same tags,\n\t\t\t// but are not exactly equal\n\t\t\tconflicts = append(conflicts, FKConflict{\n\t\t\t\tKind: TagCollision,\n\t\t\t\tOurs: ourFK,\n\t\t\t\tTheirs: theirFK,\n\t\t\t})\n\t\t}\n\n\t\ttheirFK, ok = theirNewFKs.GetByNameCaseInsensitive(ourFK.Name)\n\t\tif ok && !ourFK.EqualDefs(theirFK) {\n\t\t\t// Two different Foreign Keys have the same name\n\t\t\tconflicts = append(conflicts, FKConflict{\n\t\t\t\tKind: NameCollision,\n\t\t\t\tOurs: ourFK,\n\t\t\t\tTheirs: theirFK,\n\t\t\t})\n\t\t}\n\t\treturn false, err\n\t})\n\n\terr = ourNewFKs.Iter(func(ourFK doltdb.ForeignKey) (stop bool, err error) {\n\t\treturn false, common.AddKeys(ourFK)\n\t})\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\terr = theirNewFKs.Iter(func(theirFK doltdb.ForeignKey) (stop bool, err error) {\n\t\treturn false, common.AddKeys(theirFK)\n\t})\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcommon, err = pruneInvalidForeignKeys(ctx, common, mergedRoot)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn common, conflicts, err\n}", "func ThreeWayDiff(orig, config, live *unstructured.Unstructured) (*DiffResult, error) {\n\torig = removeNamespaceAnnotation(orig)\n\tconfig = removeNamespaceAnnotation(config)\n\n\t// 1. calculate a 3-way merge patch\n\tpatchBytes, newVersionedObject, err := threeWayMergePatch(orig, config, live)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// 2. get expected live object by applying the patch against the live object\n\tliveBytes, err := json.Marshal(live)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar predictedLiveBytes []byte\n\t// If orig/config/live represents a registered scheme...\n\tif newVersionedObject != nil {\n\t\t// Apply patch while applying scheme defaults\n\t\tliveBytes, predictedLiveBytes, err = applyPatch(liveBytes, patchBytes, newVersionedObject)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\t// Otherwise, merge patch directly as JSON\n\t\tpredictedLiveBytes, err = jsonpatch.MergePatch(liveBytes, patchBytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tpredictedLive := &unstructured.Unstructured{}\n\terr = json.Unmarshal(predictedLiveBytes, predictedLive)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buildDiffResult(predictedLiveBytes, liveBytes), nil\n}", "func (s *mergeBaseSuite) TestDoubleCommonAncestorInCrossMerge(c *C) {\n\trevs := []string{\"C\", \"D\"}\n\texpectedRevs := []string{\"CD1\", \"CD2\"}\n\ts.AssertMergeBase(c, revs, expectedRevs)\n}", "func createThreeWayMergePatch(found, child *unstructured.Unstructured) ([]byte, error) {\n\toriginal, err := getLastAppliedObject(found)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to get last applied object: %v\", err)\n\t}\n\tfoundJSON, childJSON, originalJSON, err := getJSON(found, child, original)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting JSON: %v\", err)\n\t}\n\n\tpatch, err := createThreeWayJSONMergePatch(originalJSON, childJSON, foundJSON)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create three way merge patch: %v\", err)\n\t}\n\treturn patch, nil\n}", "func checksInCommon(ourChks, theirChks, ancChks []schema.Check) ([]schema.Check, []ChkConflict) {\n\t// Make map of their checks for fast lookup\n\ttheirChkMap := make(map[string]schema.Check)\n\tfor _, chk := range theirChks {\n\t\ttheirChkMap[chk.Name()] = chk\n\t}\n\n\t// Make map of ancestor checks for fast lookup\n\tancChkMap := make(map[string]schema.Check)\n\tfor _, chk := range ancChks {\n\t\tancChkMap[chk.Name()] = chk\n\t}\n\n\t// Iterate over our checks\n\tvar common []schema.Check\n\tvar conflicts []ChkConflict\n\tfor _, ourChk := range ourChks {\n\t\t// See if ours and theirs both have a CHECK by this name\n\t\ttheirChk, ok := theirChkMap[ourChk.Name()]\n\t\t// Ours and theirs do have this CHECK in common, will be dealt with elsewhere\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\t// NO CONFLICT: our and their check are defined exactly the same\n\t\tif ourChk == theirChk {\n\t\t\tcommon = append(common, ourChk)\n\t\t\tcontinue\n\t\t}\n\n\t\t// See if ancestor also has this check\n\t\tancChk, ok := ancChkMap[ourChk.Name()]\n\t\t// CONFLICT: our and their CHECK have the same name, but different definitions\n\t\tif !ok {\n\t\t\tconflicts = append(conflicts, ChkConflict{\n\t\t\t\tKind: NameCollision,\n\t\t\t\tOurs: ourChk,\n\t\t\t\tTheirs: theirChk,\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\n\t\t// NO CONFLICT: CHECK was only modified in our branch, so update check definition with ours\n\t\tif ancChk == theirChk {\n\t\t\tcommon = append(common, ourChk)\n\t\t\tcontinue\n\t\t}\n\n\t\t// NO CONFLICT: CHECK was only modified in their branch, so update check definition with theirs\n\t\tif ancChk == ourChk {\n\t\t\tcommon = append(common, ourChk)\n\t\t\tcontinue\n\t\t}\n\n\t\t// CONFLICT: CHECK was modified on both\n\t\tconflicts = append(conflicts, ChkConflict{\n\t\t\tKind: NameCollision,\n\t\t\tOurs: ourChk,\n\t\t\tTheirs: theirChk,\n\t\t})\n\t}\n\n\treturn common, conflicts\n}", "func FindCommonAncestorOfTwoNodes(tree *Node, a *Node, b *Node) *Node {\n\n}", "func ReadTreeMerge(c *Client, opt ReadTreeOptions, stage1, stage2, stage3 Treeish) (*Index, error) {\n\tidx, err := c.GitDir.ReadIndex()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\torigMap := idx.GetMap()\n\n\tbase, err := GetIndexMap(c, stage1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tours, err := GetIndexMap(c, stage2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttheirs, err := GetIndexMap(c, stage3)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create a fake map which contains all objects in base, ours, or theirs\n\tallObjects := make(map[IndexPath]bool)\n\tfor path, _ := range base {\n\t\tallObjects[path] = true\n\t}\n\tfor path, _ := range ours {\n\t\tallObjects[path] = true\n\t}\n\tfor path, _ := range theirs {\n\t\tallObjects[path] = true\n\t}\n\n\tfor path, _ := range allObjects {\n\n\t\t// All three trees are the same, don't do anything to the index.\n\t\tif samePath(base, ours, path) && samePath(base, theirs, path) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If both stage2 and stage3 are the same, the work has been done in\n\t\t// both branches, so collapse to stage0 (use our changes)\n\t\tif samePath(ours, theirs, path) {\n\t\t\tidx.AddStage(c, path, ours[path].Sha1, Stage0, ours[path].Mtime, ours[path].Mtimenano, ours[path].Fsize)\n\t\t\tcontinue\n\t\t}\n\n\t\t// If stage1 and stage2 are the same, our branch didn't do anything,\n\t\t// but theirs did, so take their changes.\n\t\tif samePath(base, ours, path) {\n\t\t\tidx.AddStage(c, path, theirs[path].Sha1, Stage0, theirs[path].Mtime, theirs[path].Mtimenano, theirs[path].Fsize)\n\t\t\tcontinue\n\t\t}\n\n\t\t// If stage1 and stage3 are the same, we did something but they didn't,\n\t\t// so take our changes\n\t\tif samePath(base, theirs, path) {\n\t\t\tif o, ok := ours[path]; ok {\n\t\t\t\tidx.AddStage(c, path, o.Sha1, Stage0, o.Mtime, o.Mtimenano, o.Fsize)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// We couldn't short-circuit out, so add all three stages.\n\n\t\t// Remove Stage0 if it exists. If it doesn't, then at worst we'll\n\t\t// remove a stage that we're about to add back.\n\t\tidx.RemoveFile(path)\n\n\t\tif b, ok := base[path]; ok {\n\t\t\tidx.AddStage(c, path, b.Sha1, Stage1, b.Mtime, b.Mtimenano, b.Fsize)\n\t\t}\n\t\tif o, ok := ours[path]; ok {\n\t\t\tidx.AddStage(c, path, o.Sha1, Stage2, o.Mtime, o.Mtimenano, o.Fsize)\n\t\t}\n\t\tif t, ok := theirs[path]; ok {\n\t\t\tidx.AddStage(c, path, t.Sha1, Stage3, t.Mtime, t.Mtimenano, t.Fsize)\n\t\t}\n\t}\n\tif err := checkMergeAndUpdate(c, opt, origMap, idx); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn idx, readtreeSaveIndex(c, opt, idx)\n}", "func ReadTreeThreeWay(c *Client, opt ReadTreeOptions, stage1, stage2, stage3 Treeish) (*Index, error) {\n\tidx, err := c.GitDir.ReadIndex()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresetremovals, err := checkReadtreePrereqs(c, opt, idx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\torigMap := idx.GetMap()\n\n\tbase, err := GetIndexMap(c, stage1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tours, err := GetIndexMap(c, stage2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttheirs, err := GetIndexMap(c, stage3)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create a slice which contins all objects in base, ours, or theirs\n\tvar allPaths []*IndexEntry\n\tfor path, _ := range base {\n\t\tallPaths = append(allPaths, &IndexEntry{PathName: path})\n\t}\n\tfor path, _ := range ours {\n\t\tallPaths = append(allPaths, &IndexEntry{PathName: path})\n\t}\n\tfor path, _ := range theirs {\n\t\tallPaths = append(allPaths, &IndexEntry{PathName: path})\n\t}\n\t// Sort to ensure directories come before files.\n\tsort.Sort(ByPath(allPaths))\n\n\t// Remove duplicates and exclude files that aren't part of the\n\t// sparse checkout rules if applicable.\n\tvar allObjects []IndexPath\n\tfor i := range allPaths {\n\t\tif i > 0 && allPaths[i].PathName == allPaths[i-1].PathName {\n\t\t\tcontinue\n\t\t}\n\t\tallObjects = append(allObjects, allPaths[i].PathName)\n\t}\n\tvar dirs []IndexPath\n\n\t// Checking for merge conflict with index. If this seems like a confusing mess, it's mostly\n\t// because it was written to pass the t1000-read-tree-m-3way test case from the official git\n\t// test suite.\n\t//\n\t// The logic can probably be cleaned up.\n\tfor path, orig := range origMap {\n\t\to, ok := ours[path]\n\t\tif !ok {\n\t\t\t// If it's been added to the index in the same state as Stage 3, and it's not in\n\t\t\t// stage 1 or 2 it's fine.\n\t\t\tif !base.Contains(path) && !ours.Contains(path) && samePath(origMap, theirs, path) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\treturn idx, fmt.Errorf(\"Entry '%v' would be overwritten by a merge. Cannot merge.\", path)\n\t\t}\n\n\t\t// Variable names mirror the O/A/B from the test suite, with \"c\" for contains\n\t\toc := base.Contains(path)\n\t\tac := ours.Contains(path)\n\t\tbc := theirs.Contains(path)\n\n\t\tif oc && ac && bc {\n\t\t\toa := samePath(base, ours, path)\n\t\t\tob := samePath(base, theirs, path)\n\n\t\t\t// t1000-read-tree-m-3way.sh test 75 \"must match A in O && A && B && O!=A && O==B case.\n\t\t\t// (This means we can't error out if the Sha1s dont match.)\n\t\t\tif !oa && ob {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif oa && !ob {\n\t\t\t\t// Relevent cases:\n\t\t\t\t// Must match and be up-to-date in O && A && B && O==A && O!=B\n\t\t\t\t// May match B in O && A && B && O==A && O!=B\n\t\t\t\tb, ok := theirs[path]\n\t\t\t\tif ok && b.Sha1 == orig.Sha1 {\n\t\t\t\t\tcontinue\n\t\t\t\t} else if !path.IsClean(c, o.Sha1) {\n\t\t\t\t\treturn idx, fmt.Errorf(\"Entry '%v' would be overwritten by a merge. Cannot merge.\", path)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// Must match and be up-to-date in !O && A && B && A != B case test from AND\n\t\t// Must match and be up-to-date in O && A && B && A != B case test from\n\t\t// t1000-read-tree-m-3way.sh in official git\n\t\tif ac && bc && !samePath(ours, theirs, path) {\n\t\t\tif !path.IsClean(c, o.Sha1) {\n\t\t\t\treturn idx, fmt.Errorf(\"Entry '%v' would be overwritten by a merge. Cannot merge.\", path)\n\t\t\t}\n\t\t}\n\n\t\t// Must match and be up-to-date in O && A && !B && !B && O != A case AND\n\t\t// Must match and be up-to-date in O && A && !B && !B && O == A case from\n\t\t// t1000-read-tree-m-3way.sh in official git\n\t\tif oc && ac && !bc {\n\t\t\tif !path.IsClean(c, o.Sha1) {\n\t\t\t\treturn idx, fmt.Errorf(\"Entry '%v' would be overwritten by a merge. Cannot merge.\", path)\n\t\t\t}\n\t\t}\n\n\t\tif o.Sha1 != orig.Sha1 {\n\t\t\treturn idx, fmt.Errorf(\"Entry '%v' would be overwritten by a merge. Cannot merge.\", path)\n\t\t}\n\t}\n\tidx = NewIndex()\npaths:\n\tfor _, path := range allObjects {\n\t\t// Handle directory/file conflicts.\n\t\tif base.HasDir(path) || ours.HasDir(path) || theirs.HasDir(path) {\n\t\t\tif !opt.Merge && !opt.Reset {\n\t\t\t\t// If not merging, the file wins.\n\t\t\t\t// see http://www.stackoverflow.com/questions/52175720/how-does-git-read-tree-work-without-m-or-reset-option\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Keep track of what was a directory so that other\n\t\t\t// other paths know if they had a conflict higher\n\t\t\t// up in the tree.\n\t\t\tdirs = append(dirs, path)\n\n\t\t\t// Add the non-directory version fo the appropriate stage\n\t\t\tif p, ok := base[path]; ok {\n\t\t\t\tidx.AddStage(c, path, p.Mode, p.Sha1, Stage1, p.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true})\n\t\t\t}\n\t\t\tif p, ok := ours[path]; ok {\n\t\t\t\tidx.AddStage(c, path, p.Mode, p.Sha1, Stage2, p.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true})\n\t\t\t}\n\t\t\tif p, ok := theirs[path]; ok {\n\t\t\t\tidx.AddStage(c, path, p.Mode, p.Sha1, Stage3, p.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true})\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// Handle the subfiles in any directory that had a conflict\n\t\t// by just adding them in the appropriate stage, because\n\t\t// there's no way for a directory and file to not be in\n\t\t// conflict.\n\t\tfor _, d := range dirs {\n\t\t\tif strings.HasPrefix(string(path), string(d+\"/\")) {\n\t\t\t\tif p, ok := base[path]; ok {\n\t\t\t\t\tif err := idx.AddStage(c, path, p.Mode, p.Sha1, Stage1, p.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true}); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif p, ok := ours[path]; ok {\n\t\t\t\t\tif err := idx.AddStage(c, path, p.Mode, p.Sha1, Stage2, p.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true, Replace: true}); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif p, ok := theirs[path]; ok {\n\t\t\t\t\tif err := idx.AddStage(c, path, p.Mode, p.Sha1, Stage3, p.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true}); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcontinue paths\n\t\t\t}\n\t\t}\n\n\t\t// From here on out, we assume everything is a file.\n\n\t\t// All three trees are the same, don't do anything to the index.\n\t\tif samePath(base, ours, path) && samePath(base, theirs, path) {\n\t\t\tif err := idx.AddStage(c, path, ours[path].Mode, ours[path].Sha1, Stage0, ours[path].Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true}); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// If both stage2 and stage3 are the same, the work has been done in\n\t\t// both branches, so collapse to stage0 (use our changes)\n\t\tif samePath(ours, theirs, path) {\n\t\t\tif ours.Contains(path) {\n\t\t\t\tif err := idx.AddStage(c, path, ours[path].Mode, ours[path].Sha1, Stage0, ours[path].Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true}); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// If stage1 and stage2 are the same, our branch didn't do anything,\n\t\t// but theirs did, so take their changes.\n\t\tif samePath(base, ours, path) {\n\t\t\tif theirs.Contains(path) {\n\t\t\t\tif err := idx.AddStage(c, path, theirs[path].Mode, theirs[path].Sha1, Stage0, theirs[path].Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true}); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// If stage1 and stage3 are the same, we did something\n\t\t// but they didn't, so take our changes\n\t\tif samePath(base, theirs, path) {\n\t\t\tif ours.Contains(path) {\n\t\t\t\to := ours[path]\n\t\t\t\tif err := idx.AddStage(c, path, o.Mode, o.Sha1, Stage0, o.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true}); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// We couldn't short-circuit out, so add all three stages.\n\n\t\t// Remove Stage0 if it exists. If it doesn't, then at worst we'll\n\t\t// remove a stage that we're about to add back.\n\t\tidx.RemoveFile(path)\n\n\t\tif b, ok := base[path]; ok {\n\t\t\tidx.AddStage(c, path, b.Mode, b.Sha1, Stage1, b.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true})\n\t\t}\n\t\tif o, ok := ours[path]; ok {\n\t\t\tidx.AddStage(c, path, o.Mode, o.Sha1, Stage2, o.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true})\n\t\t}\n\t\tif t, ok := theirs[path]; ok {\n\t\t\tidx.AddStage(c, path, t.Mode, t.Sha1, Stage3, t.Fsize, time.Now().UnixNano(), UpdateIndexOptions{Add: true})\n\t\t}\n\t}\n\n\tif err := checkMergeAndUpdate(c, opt, origMap, idx, resetremovals); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn idx, readtreeSaveIndex(c, opt, idx)\n}", "func TestResolveConflictsWithMultipleLevelDeps(t *testing.T) {\n\tr := commonTestlibExampleReplica()\n\tr.InstanceMatrix[0][6] = NewInstance(r, 0, 6)\n\tr.InstanceMatrix[0][6].status = committed\n\tr.InstanceMatrix[0][6].deps = message.Dependencies{4, 5, 6, 7, 8}\n\n\t// create 1st level deps (4, 5, 6, 7, 8)\n\tfor i := range r.InstanceMatrix {\n\t\tr.InstanceMatrix[i][i+4] = NewInstance(r, uint8(i), uint64(i+4))\n\t\tr.InstanceMatrix[i][i+4].status = committed\n\t}\n\tr.InstanceMatrix[0][4].deps = message.Dependencies{2, 0, 0, 0, 0}\n\tr.InstanceMatrix[1][5].deps = message.Dependencies{2, 3, 0, 0, 0}\n\tr.InstanceMatrix[2][6].deps = message.Dependencies{2, 3, 4, 0, 0}\n\tr.InstanceMatrix[3][7].deps = message.Dependencies{2, 3, 4, 5, 0}\n\tr.InstanceMatrix[4][8].deps = message.Dependencies{2, 3, 4, 5, 6}\n\n\t// create 2nd level deps (2, 3, 4, 5, 6)\n\tfor i := range r.InstanceMatrix {\n\t\tr.InstanceMatrix[i][i+2] = NewInstance(r, uint8(i), uint64(i+2))\n\t\tr.InstanceMatrix[i][i+2].status = committed\n\t}\n\n\tassert.True(t, r.resolveConflicts(r.InstanceMatrix[0][6]))\n\n\t// test result list\n\tsccResultInstances := make([]*Instance, 0)\n\tfor _, instances := range r.sccResults {\n\t\tsccResultInstances = append(sccResultInstances, instances...)\n\t}\n\tassert.Equal(t, len(sccResultInstances), 11)\n\tj := 0\n\tfor i := range r.InstanceMatrix {\n\t\tassert.Equal(t, sccResultInstances[j], r.InstanceMatrix[i][i+2])\n\t\tj++\n\t\tassert.Equal(t, sccResultInstances[j], r.InstanceMatrix[i][i+4])\n\t\tj++\n\t}\n\tassert.Equal(t, sccResultInstances[j], r.InstanceMatrix[0][6])\n}", "func (s *mergeBaseSuite) TestCommonAncestorInMergedOrphans(c *C) {\n\trevs := []string{\"A\", \"B\"}\n\texpectedRevs := []string{\"AB\"}\n\ts.AssertMergeBase(c, revs, expectedRevs)\n}", "func Merge(a, b []Tx, resolved []Conflict) (c []Tx, conflicts []Conflict) {\n\tfor _, r := range resolved {\n\t\tif r.resolution == resolveNone {\n\t\t\treturn nil, resolved\n\t\t}\n\t}\n\n\tlena := len(a)\n\tlenb := len(b)\n\n\tif lena == lenb &&\n\t\ta[0].Time == b[0].Time && a[lena-1].Time == b[lenb-1].Time {\n\t\t// These are the same list of events\n\t\t// There can be no possible fork that has happened if they\n\t\t// 1. Are not of differing length\n\t\t// 2. Start at the unique ID\n\t\t// 3. End at the unique ID\n\t\treturn a, nil\n\t}\n\n\tmost := lena\n\tif lenb > most {\n\t\tmost = lenb\n\t}\n\n\tc = make([]Tx, 0, most)\n\tdeleted := make(map[string]int)\n\n\t// CheckConflict checks the last thing that was appended to c to see\n\t// if there's a conflict with having added that event.\n\t// It creates a single conflict per uuid/delete combo.\n\t//\n\t// In the event that there's a resolution for this particular conflict\n\t// it will be applied here by deleting the deletion event off the end of c\n\t// or by deleting the sets that conflicted with it off of c\n\tcheckConflict := func() {\n\t\tlast := len(c) - 1\n\t\tind, wasDeleted := deleted[c[last].UUID]\n\t\t// If we haven't been marked as deleted, and we're a delete event\n\t\t// mark it as such. If we haven't been marked as deleted before\n\t\t// and we're not a delete event, it doesn't matter.\n\t\tif !wasDeleted {\n\t\t\tif c[last].Kind != TxDelete {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Before we mark ourselves as deleted, make sure we aren't\n\t\t\t// part of a resolution.\n\t\t\tfor _, res := range resolved {\n\t\t\t\tif res.Initial.Time != c[last].Time {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// We are part of this resolution\n\t\t\t\tif res.resolution == resolveDiscardInitial {\n\t\t\t\t\t// We delete ourselves\n\t\t\t\t\tc = c[:last]\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tdeleted[c[last].UUID] = last\n\t\t\treturn\n\t\t}\n\n\t\t// We've previously been deleted and have found an add/set operation of\n\t\t// some kind, this is a conflict.\n\t\tdeleteTx := c[ind]\n\t\t// Check if its resolved\n\t\tfor _, res := range resolved {\n\t\t\tif res.Initial.Time == deleteTx.Time {\n\t\t\t\t// Assert for the impossible, and delete ourselves off the end\n\t\t\t\t// This is impossible because if it was resolved in the other\n\t\t\t\t// way it should have been handled above.\n\t\t\t\tif res.resolution != resolveDiscardConflict {\n\t\t\t\t\tpanic(\"impossible situation\")\n\t\t\t\t}\n\t\t\t\tc = c[:last]\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// Make sure we haven't noted this one already first\n\t\tfor _, con := range conflicts {\n\t\t\tif con.Initial.Time == deleteTx.Time {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// Add it\n\t\tconflicts = append(conflicts, Conflict{\n\t\t\tKind: ConflictKindDeleteSet,\n\t\t\tInitial: deleteTx,\n\t\t\tConflict: c[last],\n\t\t})\n\t}\n\n\ti, j := 0, 0\n\tfor {\n\t\tif i >= lena || j >= lenb {\n\t\t\tbreak\n\t\t}\n\n\t\t// If ids are the same, append and move on, haven't reached fork\n\t\tif a[i].Time == b[j].Time {\n\t\t\tif a[i].Kind == TxDelete {\n\t\t\t\tdeleted[a[i].UUID] = i\n\t\t\t}\n\n\t\t\tc = append(c, a[i])\n\t\t\ti++\n\t\t\tj++\n\t\t\tcontinue\n\t\t}\n\n\t\t// We've forked.\n\t\t// If the fork happens and we have not moved either i or j\n\t\t// that means that there is no common ancestry and this is likely a\n\t\t// mistake to be syncing these. Create a conflict. This will always\n\t\t// be the first conflict.\n\t\tif i == 0 && j == 0 {\n\t\t\t// Check if it's been resolved\n\t\t\tif len(resolved) == 0 || resolved[0].resolution != resolveForce {\n\t\t\t\tconflicts = append(conflicts, Conflict{\n\t\t\t\t\tKind: ConflictKindRoot,\n\t\t\t\t\tInitial: a[i],\n\t\t\t\t\tConflict: b[j],\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\t// Compare the txs\n\t\tif a[i].Time < b[j].Time {\n\t\t\tc = append(c, a[i])\n\t\t\ti++\n\t\t} else {\n\t\t\tc = append(c, b[j])\n\t\t\tj++\n\t\t}\n\n\t\tcheckConflict()\n\t}\n\n\t// Append the rest of the events\n\tfor ; i < lena; i++ {\n\t\tc = append(c, a[i])\n\t\tcheckConflict()\n\t}\n\tfor ; j < lenb; j++ {\n\t\tc = append(c, b[j])\n\t\tcheckConflict()\n\t}\n\n\tif len(conflicts) != 0 {\n\t\treturn nil, conflicts\n\t}\n\n\treturn c, nil\n}", "func TestMerge3_Merge_fail(t *testing.T) {\n\t// TODO: make this test pass on windows -- currently failing due to comment whitespace changes\n\ttestutil.SkipWindows(t)\n\n\t_, datadir, _, ok := runtime.Caller(0)\n\tif !assert.True(t, ok) {\n\t\tt.FailNow()\n\t}\n\tdatadir = filepath.Join(filepath.Dir(datadir), \"testdata2\")\n\n\t// setup the local directory\n\tdir := t.TempDir()\n\n\tif !assert.NoError(t, copyutil.CopyDir(\n\t\tfilesys.MakeFsOnDisk(),\n\t\tfilepath.Join(datadir, \"dataset1-localupdates\"),\n\t\tfilepath.Join(dir, \"dataset1\"))) {\n\t\tt.FailNow()\n\t}\n\n\terr := filters.Merge3{\n\t\tOriginalPath: filepath.Join(datadir, \"dataset1\"),\n\t\tUpdatedPath: filepath.Join(datadir, \"dataset1-remoteupdates\"),\n\t\tDestPath: filepath.Join(dir, \"dataset1\"),\n\t\tMatcher: &filters.DefaultGVKNNMatcher{MergeOnPath: false},\n\t}.Merge()\n\tif !assert.Error(t, err) {\n\t\tt.FailNow()\n\t}\n}", "func createThreeWayJSONMergePatch(original, modified, current []byte) ([]byte, error) {\n\tif len(original) == 0 {\n\t\toriginal = []byte(`{}`)\n\t}\n\tif len(modified) == 0 {\n\t\tmodified = []byte(`{}`)\n\t}\n\tif len(current) == 0 {\n\t\tcurrent = []byte(`{}`)\n\t}\n\n\taddAndChange, err := jsonpatch.CreatePatch(current, modified)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error comparing current and desired state: %v\", err)\n\t}\n\t// Only keep addition and changes\n\taddAndChange = keepOrDeleteRemoveInPatch(addAndChange, false)\n\taddAndChange = filterBlacklistedPaths(addAndChange)\n\n\tdel, err := jsonpatch.CreatePatch(original, modified)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error comparing last applied and desired state: %v\", err)\n\t}\n\t// Only keep deletion\n\tdel = keepOrDeleteRemoveInPatch(del, true)\n\n\tpatch, err := mergePatchToJSON(del, addAndChange)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error merging patches: %v\", err)\n\t}\n\n\treturn patch, nil\n}", "func (g *Graph) FindLowestCommonAncestor(refs ...string) (*Object, error) {\n\tif len(refs) < 2 {\n\t\treturn nil, fmt.Errorf(\"Not enough references given to find ancestor: Found %v but need at least 2\", len(refs))\n\t}\n\n\t// Extract the right reference and process errors or inexistent references\n\tvar leftRef, found, err = g.ReferenceAdapter.ReadReference(refs[0])\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error while reading reference %s\", refs[0])\n\t} else if !found {\n\t\treturn nil, fmt.Errorf(\"Cannot find reference %s\", refs[0])\n\t}\n\n\tvar leftID = leftRef.TargetID\n\tvar rightID ObjectID\n\n\tif len(refs) > 2 {\n\t\tvar recLeft, recErr = g.FindLowestCommonAncestor(refs[1:]...)\n\t\tif recErr != nil {\n\t\t\treturn nil, recErr\n\t\t} else if recLeft == nil {\n\t\t\treturn nil, fmt.Errorf(\"Cannot find lowest common ancestor\")\n\t\t} else {\n\t\t\trightID = recLeft.ID\n\t\t}\n\t} else {\n\t\tvar rightRef, found, refErr = g.ReferenceAdapter.ReadReference(refs[1])\n\t\tif refErr != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error while reading reference %s\", refs[1])\n\t\t} else if !found {\n\t\t\treturn nil, fmt.Errorf(\"Cannot find reference %s\", refs[1])\n\t\t}\n\t\trightID = rightRef.TargetID\n\t}\n\n\t// Parses the graph to a tree beginning at the specified id\n\tleftNodes, _, err := g.toTree(leftID)\n\n\t// Find all intersection object with leftNodes\n\t// Function that analyzes whether the given object represents a collision\n\tvar isCollision = func(obj *Object) bool {\n\t\tvar _, exists = leftNodes[obj.ID]\n\t\treturn exists\n\t}\n\n\t// Records the node where a collision happens\n\tvar collisions = []*tree.TreeNode{}\n\tvar collisionRecorder = func(node *tree.TreeNode) {\n\t\tcollisions = append(collisions, node)\n\t}\n\n\t_, _, err = g.toCollisionTerminatedTree(rightID, isCollision, collisionRecorder)\n\n\tvar shortestCollisionPoint ObjectID\n\tvar shortestCollisionPathLenght int64\n\tshortestCollisionPathLenght = math.MaxInt64\n\n\t// Iterate over all collisions and find the one with the shortest path length\n\tfor _, k := range collisions {\n\t\tvar id = ObjectID(k.ID.(ObjectID))\n\t\tvar totalPathLength = k.Depth + leftNodes[id].Depth\n\t\tif totalPathLength < shortestCollisionPathLenght {\n\t\t\tshortestCollisionPathLenght = totalPathLength\n\t\t\tshortestCollisionPoint = id\n\t\t}\n\t}\n\n\tobj, err := g.ObjectAdapter.ReadObject(shortestCollisionPoint[:])\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Cannot find object with id %#x\", shortestCollisionPoint[:4])\n\t}\n\n\treturn &obj, nil\n}", "func (cs *ColStore) makeWay(node1, node2 *ColStoreNode) {\n\tswitch {\n\tcase node1.Col.Max < node2.Col.Min:\n\t\t// The node2 starts after node1 ends, there's no overlap\n\t\t//\n\t\t// Node1 |----|\n\t\t// Node2 |----|\n\t\tif node1.Next != nil {\n\t\t\tif node1.Next.Col.Min <= node2.Col.Max {\n\t\t\t\tcs.makeWay(node1.Next, node2)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcs.addNode(node1, node2, node1.Next)\n\t\t\treturn\n\t\t}\n\t\tcs.addNode(node1, node2, nil)\n\t\treturn\n\n\tcase node1.Col.Min > node2.Col.Max:\n\t\t// Node2 ends before node1 begins, there's no overlap\n\t\t//\n\t\t// Node1 |-----|\n\t\t// Node2 |----|\n\t\tif node1.Prev != nil {\n\t\t\tif node1.Prev.Col.Max >= node2.Col.Min {\n\t\t\t\tcs.makeWay(node1.Prev, node2)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcs.addNode(node1.Prev, node2, node1)\n\t\t\treturn\n\t\t}\n\t\tcs.addNode(nil, node2, node1)\n\t\treturn\n\n\tcase node1.Col.Min == node2.Col.Min && node1.Col.Max == node2.Col.Max:\n\t\t// Exact match\n\t\t//\n\t\t// Node1 |xxx|\n\t\t// Node2 |---|\n\n\t\tprev := node1.Prev\n\t\tnext := node1.Next\n\t\tcs.removeNode(node1)\n\t\tcs.addNode(prev, node2, next)\n\t\t// Remove node may have set the root to nil\n\t\tif cs.Root == nil {\n\t\t\tcs.Root = node2\n\t\t}\n\t\treturn\n\n\tcase node1.Col.Min > node2.Col.Min && node1.Col.Max < node2.Col.Max:\n\t\t// Node2 envelopes node1\n\t\t//\n\t\t// Node1 |xx|\n\t\t// Node2 |----|\n\n\t\tprev := node1.Prev\n\t\tnext := node1.Next\n\t\tcs.removeNode(node1)\n\t\tswitch {\n\t\tcase prev == node2:\n\t\t\tnode2.Next = next\n\t\tcase next == node2:\n\t\t\tnode2.Prev = prev\n\t\tdefault:\n\t\t\tcs.addNode(prev, node2, next)\n\t\t}\n\n\t\tif node2.Prev != nil && node2.Prev.Col.Max >= node2.Col.Min {\n\t\t\tcs.makeWay(prev, node2)\n\t\t}\n\t\tif node2.Next != nil && node2.Next.Col.Min <= node2.Col.Max {\n\t\t\tcs.makeWay(next, node2)\n\t\t}\n\n\t\tif cs.Root == nil {\n\t\t\tcs.Root = node2\n\t\t}\n\n\tcase node1.Col.Min < node2.Col.Min && node1.Col.Max > node2.Col.Max:\n\t\t// Node2 bisects node1:\n\t\t//\n\t\t// Node1 |---xx---|\n\t\t// Node2 |--|\n\t\tnewCol := node1.Col.copyToRange(node2.Col.Max+1, node1.Col.Max)\n\t\tnewNode := &ColStoreNode{Col: newCol}\n\t\tcs.addNode(node1, newNode, node1.Next)\n\t\tnode1.Col.Max = node2.Col.Min - 1\n\t\tcs.addNode(node1, node2, newNode)\n\t\treturn\n\n\tcase node1.Col.Max >= node2.Col.Min && node1.Col.Min < node2.Col.Min:\n\t\t// Node2 overlaps node1 at some point above it's minimum:\n\t\t//\n\t\t// Node1 |----xx|\n\t\t// Node2 |-------|\n\t\tnext := node1.Next\n\t\tnode1.Col.Max = node2.Col.Min - 1\n\t\tif next == node2 {\n\t\t\treturn\n\t\t}\n\t\tcs.addNode(node1, node2, next)\n\t\tif next != nil && next.Col.Min <= node2.Col.Max {\n\t\t\tcs.makeWay(next, node2)\n\t\t}\n\t\treturn\n\n\tcase node1.Col.Min <= node2.Col.Max && node1.Col.Min > node2.Col.Min:\n\t\t// Node2 overlaps node1 at some point below it's maximum:\n\t\t//\n\t\t// Node1: |------|\n\t\t// Node2: |----xx|\n\t\tprev := node1.Prev\n\t\tnode1.Col.Min = node2.Col.Max + 1\n\t\tif prev == node2 {\n\t\t\treturn\n\t\t}\n\t\tcs.addNode(prev, node2, node1)\n\t\tif prev != nil && prev.Col.Max >= node2.Col.Min {\n\t\t\tcs.makeWay(node1.Prev, node2)\n\t\t}\n\t\treturn\n\t}\n\treturn\n}", "func TestLedger_SetConflictConfirmed(t *testing.T) {\n\tworkers := workerpool.NewGroup(t.Name())\n\ttf := realitiesledger.NewDefaultTestFramework(t, workers.CreateGroup(\"LedgerTestFramework\"))\n\n\t// Step 1: Bottom Layer\n\ttf.CreateTransaction(\"G\", 3, \"Genesis\")\n\ttf.CreateTransaction(\"TXA\", 1, \"G.0\")\n\ttf.CreateTransaction(\"TXB\", 1, \"G.0\")\n\ttf.CreateTransaction(\"TXC\", 1, \"G.1\")\n\ttf.CreateTransaction(\"TXD\", 1, \"G.1\")\n\ttf.CreateTransaction(\"TXH\", 1, \"G.2\")\n\ttf.CreateTransaction(\"TXI\", 1, \"G.2\")\n\t// Step 2: Middle Layer\n\ttf.CreateTransaction(\"TXE\", 1, \"TXA.0\", \"TXC.0\")\n\t// Step 3: Top Layer\n\ttf.CreateTransaction(\"TXF\", 1, \"TXE.0\")\n\t// Step 4: Top Layer\n\ttf.CreateTransaction(\"TXG\", 1, \"TXE.0\")\n\t// Step 5: TopTop Layer\n\ttf.CreateTransaction(\"TXL\", 1, \"TXG.0\", \"TXH.0\")\n\t// Step 6: TopTopTOP Layer\n\ttf.CreateTransaction(\"TXM\", 1, \"TXL.0\")\n\n\t// Mark A as Confirmed\n\t{\n\t\trequire.NoError(t, tf.IssueTransactions(\"G\", \"TXA\", \"TXB\", \"TXC\", \"TXD\", \"TXH\", \"TXI\"))\n\t\trequire.True(t, tf.Instance.ConflictDAG().SetConflictAccepted(tf.Transaction(\"TXA\").ID()))\n\n\t\ttf.AssertConflictIDs(map[string][]string{\n\t\t\t\"G\": {},\n\t\t\t\"TXA\": {\"TXA\"},\n\t\t\t\"TXB\": {\"TXB\"},\n\t\t\t\"TXC\": {\"TXC\"},\n\t\t\t\"TXD\": {\"TXD\"},\n\t\t\t\"TXH\": {\"TXH\"},\n\t\t\t\"TXI\": {\"TXI\"},\n\t\t})\n\n\t\ttf.AssertConflictDAG(map[string][]string{\n\t\t\t\"TXA\": {},\n\t\t\t\"TXB\": {},\n\t\t\t\"TXC\": {},\n\t\t\t\"TXD\": {},\n\t\t\t\"TXH\": {},\n\t\t\t\"TXI\": {},\n\t\t})\n\n\t\trequire.Equal(t, confirmation.Accepted, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXA\")))\n\t\trequire.Equal(t, confirmation.Rejected, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXB\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXC\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXD\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXH\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXI\")))\n\t}\n\n\t// When creating the middle layer the new transaction E should be booked only under its Pending parent C\n\t{\n\t\trequire.NoError(t, tf.IssueTransactions(\"TXE\"))\n\n\t\ttf.AssertConflictIDs(map[string][]string{\n\t\t\t\"G\": {},\n\t\t\t\"TXA\": {\"TXA\"},\n\t\t\t\"TXB\": {\"TXB\"},\n\t\t\t\"TXC\": {\"TXC\"},\n\t\t\t\"TXD\": {\"TXD\"},\n\t\t\t\"TXH\": {\"TXH\"},\n\t\t\t\"TXI\": {\"TXI\"},\n\t\t\t\"TXE\": {\"TXC\"},\n\t\t})\n\n\t\ttf.AssertConflictDAG(map[string][]string{\n\t\t\t\"TXA\": {},\n\t\t\t\"TXB\": {},\n\t\t\t\"TXC\": {},\n\t\t\t\"TXD\": {},\n\t\t\t\"TXH\": {},\n\t\t\t\"TXI\": {},\n\t\t})\n\n\t\trequire.Equal(t, confirmation.Accepted, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXA\")))\n\t\trequire.Equal(t, confirmation.Rejected, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXB\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXC\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXD\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXH\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXI\")))\n\t}\n\n\t// When creating the first transaction (F) of top layer it should be booked under the Pending parent C\n\t{\n\t\trequire.NoError(t, tf.IssueTransactions(\"TXF\"))\n\n\t\ttf.AssertConflictIDs(map[string][]string{\n\t\t\t\"G\": {},\n\t\t\t\"TXA\": {\"TXA\"},\n\t\t\t\"TXB\": {\"TXB\"},\n\t\t\t\"TXC\": {\"TXC\"},\n\t\t\t\"TXD\": {\"TXD\"},\n\t\t\t\"TXH\": {\"TXH\"},\n\t\t\t\"TXI\": {\"TXI\"},\n\t\t\t// Conflicts F & G are spawned by the fork of G\n\t\t\t\"TXF\": {\"TXC\"},\n\t\t})\n\n\t\ttf.AssertConflictDAG(map[string][]string{\n\t\t\t\"TXA\": {},\n\t\t\t\"TXB\": {},\n\t\t\t\"TXC\": {},\n\t\t\t\"TXD\": {},\n\t\t\t\"TXH\": {},\n\t\t\t\"TXI\": {},\n\t\t})\n\n\t\trequire.Equal(t, confirmation.Accepted, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXA\")))\n\t\trequire.Equal(t, confirmation.Rejected, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXB\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXC\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXD\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXH\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXI\")))\n\t}\n\n\t// When creating the conflicting TX (G) of the top layer conflicts F & G are spawned by the fork of G\n\t{\n\t\trequire.NoError(t, tf.IssueTransactions(\"TXG\"))\n\n\t\ttf.AssertConflictIDs(map[string][]string{\n\t\t\t\"G\": {},\n\t\t\t\"TXA\": {\"TXA\"},\n\t\t\t\"TXB\": {\"TXB\"},\n\t\t\t\"TXC\": {\"TXC\"},\n\t\t\t\"TXD\": {\"TXD\"},\n\t\t\t\"TXH\": {\"TXH\"},\n\t\t\t\"TXI\": {\"TXI\"},\n\t\t\t// Conflicts F & G are spawned by the fork of G\n\t\t\t\"TXF\": {\"TXF\"},\n\t\t\t\"TXG\": {\"TXG\"},\n\t\t})\n\n\t\ttf.AssertConflictDAG(map[string][]string{\n\t\t\t\"TXA\": {},\n\t\t\t\"TXB\": {},\n\t\t\t\"TXC\": {},\n\t\t\t\"TXD\": {},\n\t\t\t\"TXH\": {},\n\t\t\t\"TXI\": {},\n\t\t\t\"TXF\": {\"TXC\"},\n\t\t\t\"TXG\": {\"TXC\"},\n\t\t})\n\n\t\trequire.Equal(t, confirmation.Accepted, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXA\")))\n\t\trequire.Equal(t, confirmation.Rejected, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXB\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXC\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXD\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXH\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXI\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXF\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXG\")))\n\t}\n\n\trequire.True(t, tf.Instance.ConflictDAG().SetConflictAccepted(tf.Transaction(\"TXD\").ID()))\n\n\t// TX L combines a child (G) of a Rejected conflict (C) and a pending conflict H, resulting in (G,H)\n\t{\n\t\trequire.NoError(t, tf.IssueTransactions(\"TXL\"))\n\n\t\ttf.AssertConflictIDs(map[string][]string{\n\t\t\t\"G\": {},\n\t\t\t\"TXA\": {\"TXA\"},\n\t\t\t\"TXB\": {\"TXB\"},\n\t\t\t\"TXC\": {\"TXC\"},\n\t\t\t\"TXD\": {\"TXD\"},\n\t\t\t\"TXH\": {\"TXH\"},\n\t\t\t\"TXI\": {\"TXI\"},\n\t\t\t// Conflicts F & G are spawned by the fork of G\n\t\t\t\"TXF\": {\"TXF\"},\n\t\t\t\"TXG\": {\"TXG\"},\n\t\t\t\"TXL\": {\"TXG\", \"TXH\"},\n\t\t})\n\n\t\ttf.AssertConflictDAG(map[string][]string{\n\t\t\t\"TXA\": {},\n\t\t\t\"TXB\": {},\n\t\t\t\"TXC\": {},\n\t\t\t\"TXD\": {},\n\t\t\t\"TXH\": {},\n\t\t\t\"TXI\": {},\n\t\t\t\"TXF\": {\"TXC\"},\n\t\t\t\"TXG\": {\"TXC\"},\n\t\t})\n\n\t\trequire.Equal(t, confirmation.Accepted, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXA\")))\n\t\trequire.Equal(t, confirmation.Rejected, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXC\")))\n\t\trequire.Equal(t, confirmation.Accepted, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXD\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXH\")))\n\t\trequire.Equal(t, confirmation.Pending, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXI\")))\n\t\trequire.Equal(t, confirmation.Rejected, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXF\")))\n\t\trequire.Equal(t, confirmation.Rejected, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXG\")))\n\t\trequire.Equal(t, confirmation.Rejected, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXG\", \"TXH\")))\n\t}\n\n\trequire.True(t, tf.Instance.ConflictDAG().SetConflictAccepted(tf.Transaction(\"TXH\").ID()))\n\n\t// The new TX M should be now booked under G, as conflict H confirmed, just G because we don't propagate H further.\n\t{\n\t\trequire.NoError(t, tf.IssueTransactions(\"TXM\"))\n\n\t\ttf.AssertConflictIDs(map[string][]string{\n\t\t\t\"G\": {},\n\t\t\t\"TXA\": {\"TXA\"},\n\t\t\t\"TXB\": {\"TXB\"},\n\t\t\t\"TXC\": {\"TXC\"},\n\t\t\t\"TXD\": {\"TXD\"},\n\t\t\t\"TXH\": {\"TXH\"},\n\t\t\t\"TXI\": {\"TXI\"},\n\t\t\t// Conflicts F & G are spawned by the fork of G\n\t\t\t\"TXF\": {\"TXF\"},\n\t\t\t\"TXG\": {\"TXG\"},\n\t\t\t\"TXL\": {\"TXG\", \"TXH\"},\n\t\t\t\"TXM\": {\"TXG\"},\n\t\t})\n\n\t\ttf.AssertConflictDAG(map[string][]string{\n\t\t\t\"TXA\": {},\n\t\t\t\"TXB\": {},\n\t\t\t\"TXC\": {},\n\t\t\t\"TXD\": {},\n\t\t\t\"TXH\": {},\n\t\t\t\"TXI\": {},\n\t\t\t\"TXF\": {\"TXC\"},\n\t\t\t\"TXG\": {\"TXC\"},\n\t\t})\n\n\t\trequire.Equal(t, confirmation.Accepted, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXA\")))\n\t\trequire.Equal(t, confirmation.Rejected, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXB\")))\n\t\trequire.Equal(t, confirmation.Rejected, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXC\")))\n\t\trequire.Equal(t, confirmation.Accepted, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXD\")))\n\t\trequire.Equal(t, confirmation.Accepted, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXH\")))\n\t\trequire.Equal(t, confirmation.Rejected, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXI\")))\n\t\trequire.Equal(t, confirmation.Rejected, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXF\")))\n\t\trequire.Equal(t, confirmation.Rejected, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXG\")))\n\t\trequire.Equal(t, confirmation.Rejected, tf.Instance.ConflictDAG().ConfirmationState(tf.ConflictIDs(\"TXG\", \"TXH\")))\n\t}\n}", "func checkConflictingNodes(ctx context.Context, client client.Interface, node *libapi.Node) (v4conflict, v6conflict bool, retErr error) {\n\t// Get the full set of nodes.\n\tvar nodes []libapi.Node\n\tif nodeList, err := client.Nodes().List(ctx, options.ListOptions{}); err != nil {\n\t\tlog.WithError(err).Errorf(\"Unable to query node configuration\")\n\t\tretErr = err\n\t\treturn\n\t} else {\n\t\tnodes = nodeList.Items\n\t}\n\n\tourIPv4, _, err := cnet.ParseCIDROrIP(node.Spec.BGP.IPv4Address)\n\tif err != nil && node.Spec.BGP.IPv4Address != \"\" {\n\t\tlog.WithError(err).Errorf(\"Error parsing IPv4 CIDR '%s' for node '%s'\", node.Spec.BGP.IPv4Address, node.Name)\n\t\tretErr = err\n\t\treturn\n\t}\n\tourIPv6, _, err := cnet.ParseCIDROrIP(node.Spec.BGP.IPv6Address)\n\tif err != nil && node.Spec.BGP.IPv6Address != \"\" {\n\t\tlog.WithError(err).Errorf(\"Error parsing IPv6 CIDR '%s' for node '%s'\", node.Spec.BGP.IPv6Address, node.Name)\n\t\tretErr = err\n\t\treturn\n\t}\n\n\tfor _, theirNode := range nodes {\n\t\tif theirNode.Spec.BGP == nil {\n\t\t\t// Skip nodes that don't have BGP configured. We know\n\t\t\t// that this node does have BGP since we only perform\n\t\t\t// this check after configuring BGP.\n\t\t\tcontinue\n\t\t}\n\n\t\ttheirIPv4, _, err := cnet.ParseCIDROrIP(theirNode.Spec.BGP.IPv4Address)\n\t\tif err != nil && theirNode.Spec.BGP.IPv4Address != \"\" {\n\t\t\tlog.WithError(err).Errorf(\"Error parsing IPv4 CIDR '%s' for node '%s'\", theirNode.Spec.BGP.IPv4Address, theirNode.Name)\n\t\t\tretErr = err\n\t\t\treturn\n\t\t}\n\n\t\ttheirIPv6, _, err := cnet.ParseCIDROrIP(theirNode.Spec.BGP.IPv6Address)\n\t\tif err != nil && theirNode.Spec.BGP.IPv6Address != \"\" {\n\t\t\tlog.WithError(err).Errorf(\"Error parsing IPv6 CIDR '%s' for node '%s'\", theirNode.Spec.BGP.IPv6Address, theirNode.Name)\n\t\t\tretErr = err\n\t\t\treturn\n\t\t}\n\n\t\t// If this is our node (based on the name), check if the IP\n\t\t// addresses have changed. If so warn the user as it could be\n\t\t// an indication of multiple nodes using the same name. This\n\t\t// is not an error condition as the IPs could actually change.\n\t\tif theirNode.Name == node.Name {\n\t\t\tif theirIPv4.IP != nil && ourIPv4.IP != nil && !theirIPv4.IP.Equal(ourIPv4.IP) {\n\t\t\t\tfields := log.Fields{\"node\": theirNode.Name, \"original\": theirIPv4.String(), \"updated\": ourIPv4.String()}\n\t\t\t\tlog.WithFields(fields).Warnf(\"IPv4 address has changed. This could happen if there are multiple nodes with the same name.\")\n\t\t\t}\n\t\t\tif theirIPv6.IP != nil && ourIPv6.IP != nil && !theirIPv6.IP.Equal(ourIPv6.IP) {\n\t\t\t\tfields := log.Fields{\"node\": theirNode.Name, \"original\": theirIPv6.String(), \"updated\": ourIPv6.String()}\n\t\t\t\tlog.WithFields(fields).Warnf(\"IPv6 address has changed. This could happen if there are multiple nodes with the same name.\")\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check that other nodes aren't using the same IP addresses.\n\t\t// This is an error condition.\n\t\tif theirIPv4.IP != nil && ourIPv4.IP != nil && theirIPv4.IP.Equal(ourIPv4.IP) {\n\t\t\tlog.Warnf(\"Calico node '%s' is already using the IPv4 address %s.\", theirNode.Name, ourIPv4.String())\n\t\t\tretErr = fmt.Errorf(\"IPv4 address conflict\")\n\t\t\tv4conflict = true\n\t\t}\n\n\t\tif theirIPv6.IP != nil && ourIPv6.IP != nil && theirIPv6.IP.Equal(ourIPv6.IP) {\n\t\t\tlog.Warnf(\"Calico node '%s' is already using the IPv6 address %s.\", theirNode.Name, ourIPv6.String())\n\t\t\tretErr = fmt.Errorf(\"IPv6 address conflict\")\n\t\t\tv6conflict = true\n\t\t}\n\t}\n\treturn\n}", "func (s *mergeBaseSuite) TestAncestorBeyondMerges(c *C) {\n\trevs := []string{\"M\", \"G\"}\n\ts.AssertAncestor(c, revs, true)\n\n\trevs = []string{\"G\", \"M\"}\n\ts.AssertAncestor(c, revs, false)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
a and b cannot be merged if they are of different NomsKind, or if at least one of the two is nil, or if either is a Noms primitive.
func unmergeable(a, b types.Value) bool { if a != nil && b != nil { aKind, bKind := a.Type().Kind(), b.Type().Kind() return aKind != bKind || types.IsPrimitiveKind(aKind) || types.IsPrimitiveKind(bKind) } return true }
[ "func merge(old, a, b interface{}) (merged interface{}, conflicts bool) {\n\tif reflect.DeepEqual(a, b) {\n\t\treturn a, false\n\t}\n\tvold, va, vb := reflect.ValueOf(old), reflect.ValueOf(a), reflect.ValueOf(b)\n\ttold, ta, tb := reflect.TypeOf(old), reflect.TypeOf(a), reflect.TypeOf(b)\n\tif !isSameType(ta, tb) {\n\t\tif reflect.DeepEqual(a, old) {\n\t\t\treturn b, false\n\t\t} else if reflect.DeepEqual(b, old) {\n\t\t\treturn a, false\n\t\t}\n\t\treturn &mergeConflict{a, b}, true\n\t}\n\tif a == nil {\n\t\treturn nil, false\n\t}\n\tswitch ta.Kind() {\n\tcase reflect.Bool, reflect.String, reflect.Float64:\n\t\tif isSameType(ta, told) {\n\t\t\tif a == old {\n\t\t\t\treturn b, false\n\t\t\t} else if b == old {\n\t\t\t\treturn a, false\n\t\t\t}\n\t\t}\n\tcase reflect.Slice:\n\t\tif isSameType(ta, told) {\n\t\t\tif reflect.DeepEqual(a, old) {\n\t\t\t\treturn b, false\n\t\t\t} else if reflect.DeepEqual(b, old) {\n\t\t\t\treturn a, false\n\t\t\t}\n\t\t}\n\tcase reflect.Map:\n\t\tkold, ka, kb := make(StringSet, 0), getStringKeys(va), getStringKeys(vb)\n\t\tif told != nil && told.Kind() == reflect.Map {\n\t\t\tkold = getStringKeys(vold)\n\t\t}\n\t\taddA, remA := getAddRemoveKeys(kold, ka)\n\t\taddB, remB := getAddRemoveKeys(kold, kb)\n\t\tresult := make(map[string]interface{})\n\t\tfor k := range ka.Intersect(kb) {\n\t\t\tvar c bool\n\t\t\tresult[k], c = merge(mapIndex(vold, k), mapIndex(va, k), mapIndex(vb, k))\n\t\t\tif c {\n\t\t\t\tconflicts = true\n\t\t\t}\n\t\t}\n\t\tfor k := range addA.Subtract(kb) {\n\t\t\tresult[k] = mapIndex(va, k)\n\t\t}\n\t\tfor k := range addB.Subtract(ka) {\n\t\t\tresult[k] = mapIndex(vb, k)\n\t\t}\n\t\tfor k := range remA {\n\t\t\tif _, ok := kb[k]; !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\toldIdx, bIdx := mapIndex(vold, k), mapIndex(vb, k)\n\t\t\tif !reflect.DeepEqual(oldIdx, bIdx) {\n\t\t\t\tresult[k] = &mergeConflict{nil, bIdx}\n\t\t\t\tconflicts = true\n\t\t\t}\n\t\t}\n\t\tfor k := range remB {\n\t\t\tif _, ok := ka[k]; !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\toldIdx, aIdx := mapIndex(vold, k), mapIndex(va, k)\n\t\t\tif !reflect.DeepEqual(oldIdx, aIdx) {\n\t\t\t\tresult[k] = &mergeConflict{aIdx, nil}\n\t\t\t\tconflicts = true\n\t\t\t}\n\t\t}\n\t\treturn result, conflicts\n\t}\n\treturn &mergeConflict{a, b}, true\n}", "func canMerge(a, b *jointRequest) bool {\n\tif !reflect.DeepEqual(a.tileConfig, b.tileConfig) {\n\t\treturn false\n\t}\n\tif !reflect.DeepEqual(a.query, b.query) {\n\t\treturn false\n\t}\n\treturn a.dataset == b.dataset\n}", "func mergeAllowedMentions(a, b discordgo.AllowedMentions) discordgo.AllowedMentions {\n\t// merge mention types\nOUTER:\n\tfor _, v := range b.Parse {\n\t\tfor _, av := range a.Parse {\n\t\t\tif v == av {\n\t\t\t\tcontinue OUTER\n\t\t\t}\n\t\t}\n\n\t\ta.Parse = append(a.Parse, v)\n\t\tswitch v {\n\t\tcase discordgo.AllowedMentionTypeUsers:\n\t\t\ta.Users = nil\n\t\t\tb.Users = nil\n\t\tcase discordgo.AllowedMentionTypeRoles:\n\t\t\ta.Roles = nil\n\t\t\tb.Roles = nil\n\t\t}\n\t}\n\n\thasParseRoles := false\n\thasParseUsers := false\n\tfor _, p := range a.Parse {\n\t\tswitch p {\n\t\tcase discordgo.AllowedMentionTypeRoles:\n\t\t\thasParseRoles = true\n\t\tcase discordgo.AllowedMentionTypeUsers:\n\t\t\thasParseUsers = true\n\t\t}\n\t}\n\n\t// merge mentioned roles\n\tif !hasParseRoles {\n\tOUTER2:\n\t\tfor _, v := range b.Roles {\n\t\t\tfor _, av := range a.Roles {\n\t\t\t\tif v == av {\n\t\t\t\t\tcontinue OUTER2\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ta.Roles = append(a.Roles, v)\n\t\t}\n\t}\n\n\t// merge mentioned users\n\tif !hasParseUsers {\n\tOUTER3:\n\t\tfor _, v := range b.Users {\n\t\t\tfor _, av := range a.Users {\n\t\t\t\tif v == av {\n\t\t\t\t\tcontinue OUTER3\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ta.Users = append(a.Users, v)\n\t\t}\n\t}\n\n\treturn a\n}", "func (a Possibility) Union(b Possibility) Possibility {\n\tif a == Impossible || b == Impossible {\n\t\treturn Impossible\n\t}\n\tif a.Equals(b) == True {\n\t\treturn a\n\t}\n\treturn Maybe\n}", "func mergeIfMergable(obj reflect.Value, src reflect.Value) (reflect.Value, bool) {\n\tvar out reflect.Value\n\n\t// Look for the .WithDefaults method.\n\tmeth, ok := obj.Type().MethodByName(\"Merge\")\n\tif !ok {\n\t\treturn out, false\n\t}\n\n\t// Verify the signature matches our Mergable psuedointerface:\n\t// - two inputs (the receiver), and one output\n\t// - input types match output type exactly (disallow the usual pointer receiver semantics)\n\tif meth.Type.NumIn() != 2 || meth.Type.NumOut() != 1 {\n\t\treturn out, false\n\t}\n\tif meth.Type.In(0) != meth.Type.In(1) || meth.Type.In(0) != meth.Type.Out(0) {\n\t\treturn out, false\n\t}\n\n\t// Psuedointerface matches, call the .Merge method.\n\tout = meth.Func.Call([]reflect.Value{obj, src})[0]\n\n\treturn out, true\n}", "func compat(a, b *Type, seenA, seenB map[*Type]bool) bool { //nolint:gocyclo\n\t// Normalize and break cycles from recursive types.\n\ta, b = a.NonOptional(), b.NonOptional()\n\tif a == b || seenA[a] || seenB[b] {\n\t\treturn true\n\t}\n\tseenA[a], seenB[b] = true, true\n\t// Handle Any\n\tif a.Kind() == Any || b.Kind() == Any {\n\t\treturn true\n\t}\n\t// Handle simple scalars\n\tif ax, bx := a.Kind() == Bool, b.Kind() == Bool; ax || bx {\n\t\treturn ax && bx\n\t}\n\tif ax, bx := ttIsStringEnum(a), ttIsStringEnum(b); ax || bx {\n\t\treturn ax && bx\n\t}\n\tif ax, bx := a.Kind().IsNumber(), b.Kind().IsNumber(); ax || bx {\n\t\treturn ax && bx\n\t}\n\tif ax, bx := a.Kind() == TypeObject, b.Kind() == TypeObject; ax || bx {\n\t\treturn ax && bx\n\t}\n\t// Handle composites\n\tswitch a.Kind() {\n\tcase Array, List:\n\t\tswitch b.Kind() {\n\t\tcase Array, List:\n\t\t\treturn compat(a.Elem(), b.Elem(), seenA, seenB)\n\t\t}\n\t\treturn false\n\tcase Set:\n\t\tif b.Kind() == Set {\n\t\t\treturn compat(a.Key(), b.Key(), seenA, seenB)\n\t\t}\n\t\treturn false\n\tcase Map:\n\t\tif b.Kind() == Map {\n\t\t\treturn compat(a.Key(), b.Key(), seenA, seenB) && compat(a.Elem(), b.Elem(), seenA, seenB)\n\t\t}\n\t\treturn false\n\tcase Struct:\n\t\tif b.Kind() == Struct {\n\t\t\tif ttIsEmptyStruct(a) || ttIsEmptyStruct(b) {\n\t\t\t\treturn true // empty struct is compatible with all other structs\n\t\t\t}\n\t\t\treturn compatFields(a, b, seenA, seenB)\n\t\t}\n\t\treturn false\n\tcase Union:\n\t\tif b.Kind() == Union {\n\t\t\treturn compatFields(a, b, seenA, seenB)\n\t\t}\n\t\treturn false\n\tdefault:\n\t\tpanic(fmt.Errorf(\"vdl: Compatible unhandled types %q %q\", a, b))\n\t}\n}", "func nonBinaryMergeFn(ctx context.Context, a, b int) (int, error) {\n\treturn a + b, nil\n}", "func TestMerge(t *testing.T) {\n\ta := NewEvent(\"1970-01-01T00:00:00Z\", map[int64]interface{}{-1: int64(30), -2: \"foo\"})\n\tb := NewEvent(\"1970-01-01T00:00:00Z\", map[int64]interface{}{-1: 20, 3: \"baz\"})\n\ta.Merge(b)\n\tif a.Data[-1] != 20 || a.Data[-2] != \"foo\" || a.Data[3] != \"baz\" {\n\t\tt.Fatalf(\"Invalid merge: %v\", a.Data)\n\t}\n}", "func (a *API) Merge(other API) {\n\tif a.Short == \"\" {\n\t\ta.Short = other.Short\n\t}\n\n\tif a.Long == \"\" {\n\t\ta.Long = other.Long\n\t}\n\n\ta.Operations = append(a.Operations, other.Operations...)\n}", "func mergeTypeFlag(a, b uint) uint {\n\treturn a & (b&mysql.NotNullFlag | ^mysql.NotNullFlag) & (b&mysql.UnsignedFlag | ^mysql.UnsignedFlag)\n}", "func merge(o, n *Entry) *Entry {\n\tif o.Type != n.Type {\n\t\tpanic(fmt.Errorf(\"type mismatch for entry merge: %v vs %v\", o.Type, n.Type))\n\t}\n\tif o.RawName != n.RawName {\n\t\tpanic(fmt.Errorf(\"raw name mismatch for entry merge: %v vs %v\", o.RawName, n.RawName))\n\t}\n\n\t//deal with return type\n\tvar mreturn string\n\tif o.Return == n.Return {\n\t\tmreturn = o.Return\n\t} else {\n\t\tlog.Printf(\"merging return type for %v (%v <> %v)\", o.RawName, o.Return, n.Return)\n\t\tmreturn = \"Object\" // by default\n\n\t}\n\n\treturn &Entry{\n\t\tType: o.Type, // they must have the same type\n\t\tRawName: o.RawName,\n\t\tReturn: mreturn,\n\t\tDesc: o.Desc + \"\\n// OR\\n// \" + n.Desc,\n\t\tSignature: append(append(make([]Signature, 0, 10), o.Signature...), n.Signature...),\n\t}\n\n}", "func merge(base, patch reflect.Value, mergeConfig *MergeConfig) (reflect.Value, bool) {\n\tcommonType := base.Type()\n\n\tswitch commonType.Kind() {\n\tcase reflect.Struct:\n\t\tmerged := reflect.New(commonType).Elem()\n\t\tfor i := 0; i < base.NumField(); i++ {\n\t\t\tif !merged.Field(i).CanSet() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif mergeConfig != nil && mergeConfig.StructFieldFilter != nil {\n\t\t\t\tif !mergeConfig.StructFieldFilter(commonType.Field(i), base.Field(i), patch.Field(i)) {\n\t\t\t\t\tmerged.Field(i).Set(base.Field(i))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tval, ok := merge(base.Field(i), patch.Field(i), mergeConfig)\n\t\t\tif ok {\n\t\t\t\tmerged.Field(i).Set(val)\n\t\t\t}\n\t\t}\n\t\treturn merged, true\n\n\tcase reflect.Ptr:\n\t\tmergedPtr := reflect.New(commonType.Elem())\n\t\tif base.IsNil() && patch.IsNil() {\n\t\t\treturn mergedPtr, false\n\t\t}\n\n\t\t// clone reference values (if any)\n\t\tif base.IsNil() {\n\t\t\tval, _ := merge(patch.Elem(), patch.Elem(), mergeConfig)\n\t\t\tmergedPtr.Elem().Set(val)\n\t\t} else if patch.IsNil() {\n\t\t\tval, _ := merge(base.Elem(), base.Elem(), mergeConfig)\n\t\t\tmergedPtr.Elem().Set(val)\n\t\t} else {\n\t\t\tval, _ := merge(base.Elem(), patch.Elem(), mergeConfig)\n\t\t\tmergedPtr.Elem().Set(val)\n\t\t}\n\t\treturn mergedPtr, true\n\n\tcase reflect.Slice:\n\t\tif base.IsNil() && patch.IsNil() {\n\t\t\treturn reflect.Zero(commonType), false\n\t\t}\n\t\tif !patch.IsNil() {\n\t\t\t// use patch\n\t\t\tmerged := reflect.MakeSlice(commonType, 0, patch.Len())\n\t\t\tfor i := 0; i < patch.Len(); i++ {\n\t\t\t\t// recursively merge patch with itself. This will clone reference values.\n\t\t\t\tval, _ := merge(patch.Index(i), patch.Index(i), mergeConfig)\n\t\t\t\tmerged = reflect.Append(merged, val)\n\t\t\t}\n\t\t\treturn merged, true\n\t\t}\n\t\t// use base\n\t\tmerged := reflect.MakeSlice(commonType, 0, base.Len())\n\t\tfor i := 0; i < base.Len(); i++ {\n\n\t\t\t// recursively merge base with itself. This will clone reference values.\n\t\t\tval, _ := merge(base.Index(i), base.Index(i), mergeConfig)\n\t\t\tmerged = reflect.Append(merged, val)\n\t\t}\n\t\treturn merged, true\n\n\tcase reflect.Map:\n\t\t// maps are merged according to these rules:\n\t\t// - if patch is not nil, replace the base map completely\n\t\t// - otherwise, keep the base map\n\t\t// - reference values (eg. slice/ptr/map) will be cloned\n\t\tif base.IsNil() && patch.IsNil() {\n\t\t\treturn reflect.Zero(commonType), false\n\t\t}\n\t\tmerged := reflect.MakeMap(commonType)\n\t\tmapPtr := base\n\t\tif !patch.IsNil() {\n\t\t\tmapPtr = patch\n\t\t}\n\t\tfor _, key := range mapPtr.MapKeys() {\n\t\t\t// clone reference values\n\t\t\tval, ok := merge(mapPtr.MapIndex(key), mapPtr.MapIndex(key), mergeConfig)\n\t\t\tif !ok {\n\t\t\t\tval = reflect.New(mapPtr.MapIndex(key).Type()).Elem()\n\t\t\t}\n\t\t\tmerged.SetMapIndex(key, val)\n\t\t}\n\t\treturn merged, true\n\n\tcase reflect.Interface:\n\t\tvar val reflect.Value\n\t\tif base.IsNil() && patch.IsNil() {\n\t\t\treturn reflect.Zero(commonType), false\n\t\t}\n\n\t\t// clone reference values (if any)\n\t\tif base.IsNil() {\n\t\t\tval, _ = merge(patch.Elem(), patch.Elem(), mergeConfig)\n\t\t} else if patch.IsNil() {\n\t\t\tval, _ = merge(base.Elem(), base.Elem(), mergeConfig)\n\t\t} else {\n\t\t\tval, _ = merge(base.Elem(), patch.Elem(), mergeConfig)\n\t\t}\n\t\treturn val, true\n\n\tdefault:\n\t\treturn patch, true\n\t}\n}", "func (*MergeEmptyMaps) IsMergeOpt() {}", "func (*MergeOverwriteExistingFields) IsMergeOpt() {}", "func MergeNetem(a, b *chaosdaemon.Netem) *chaosdaemon.Netem {\n\tif a == nil && b == nil {\n\t\treturn nil\n\t}\n\t// NOTE: because proto getters check nil, we are good here even if one of them is nil.\n\t// But we just assign empty value to make IDE and linters happy.\n\tif a == nil {\n\t\ta = &chaosdaemon.Netem{}\n\t}\n\tif b == nil {\n\t\tb = &chaosdaemon.Netem{}\n\t}\n\treturn &chaosdaemon.Netem{\n\t\tTime: maxu32(a.GetTime(), b.GetTime()),\n\t\tJitter: maxu32(a.GetJitter(), b.GetJitter()),\n\t\tDelayCorr: maxf32(a.GetDelayCorr(), b.GetDelayCorr()),\n\t\tLimit: maxu32(a.GetLimit(), b.GetLimit()),\n\t\tLoss: maxf32(a.GetLoss(), b.GetLoss()),\n\t\tLossCorr: maxf32(a.GetLossCorr(), b.GetLossCorr()),\n\t\tGap: maxu32(a.GetGap(), b.GetGap()),\n\t\tDuplicate: maxf32(a.GetDuplicate(), b.GetDuplicate()),\n\t\tDuplicateCorr: maxf32(a.GetDuplicateCorr(), b.GetDuplicateCorr()),\n\t\tReorder: maxf32(a.GetReorder(), b.GetReorder()),\n\t\tReorderCorr: maxf32(a.GetReorderCorr(), b.GetReorderCorr()),\n\t\tCorrupt: maxf32(a.GetCorrupt(), b.GetCorrupt()),\n\t\tCorruptCorr: maxf32(a.GetCorruptCorr(), b.GetCorruptCorr()),\n\t}\n}", "func NeedsMerge(left, right *rtree.Range, splitSizeBytes, splitKeyCount uint64) bool {\n\tleftBytes, leftKeys := left.BytesAndKeys()\n\trightBytes, rightKeys := right.BytesAndKeys()\n\tif rightBytes == 0 {\n\t\treturn true\n\t}\n\tif leftBytes+rightBytes > splitSizeBytes {\n\t\treturn false\n\t}\n\tif leftKeys+rightKeys > splitKeyCount {\n\t\treturn false\n\t}\n\ttableID1, indexID1, isRecord1, err1 := tablecodec.DecodeKeyHead(kv.Key(left.StartKey))\n\ttableID2, indexID2, isRecord2, err2 := tablecodec.DecodeKeyHead(kv.Key(right.StartKey))\n\n\t// Failed to decode the file key head... can this happen?\n\tif err1 != nil || err2 != nil {\n\t\tlog.Warn(\"Failed to parse the key head for merging files, skipping\",\n\t\t\tlogutil.Key(\"left-start-key\", left.StartKey),\n\t\t\tlogutil.Key(\"right-start-key\", right.StartKey),\n\t\t\tlogutil.AShortError(\"left-err\", err1),\n\t\t\tlogutil.AShortError(\"right-err\", err2),\n\t\t)\n\t\treturn false\n\t}\n\t// Merge if they are both record keys\n\tif isRecord1 && isRecord2 {\n\t\t// Do not merge ranges in different tables.\n\t\treturn tableID1 == tableID2\n\t}\n\t// If they are all index keys...\n\tif !isRecord1 && !isRecord2 {\n\t\t// Do not merge ranges in different indexes even if they are in the same\n\t\t// table, as rewrite rule only supports rewriting one pattern.\n\t\t// Merge left and right if they are in the same index.\n\t\treturn tableID1 == tableID2 && indexID1 == indexID2\n\t}\n\treturn false\n}", "func checkIncompatibleStructsCast(src types.Type, dst types.Type) bool {\n\t// check if the source type is a struct\n\tsrcStruct, ok := src.(*types.Struct)\n\tif !ok {\n\t\treturn false\n\t}\n\n\t// check if the destination type is a struct\n\tdstStruct, ok := dst.(*types.Struct)\n\tif !ok {\n\t\treturn false\n\t}\n\n\tsrcPlatformDependentCount := 0\n\tdstPlatformDependentCount := 0\n\n\t// count platform dependent types in the source type\n\tfor i := 0; i < srcStruct.NumFields(); i++ {\n\t\tif isPlatformDependent(srcStruct.Field(i)) {\n\t\t\tsrcPlatformDependentCount += 1\n\t\t}\n\t}\n\n\t// count platform dependent types in the destination type\n\tfor i := 0; i < dstStruct.NumFields(); i++ {\n\t\tif isPlatformDependent(dstStruct.Field(i)) {\n\t\t\tdstPlatformDependentCount += 1\n\t\t}\n\t}\n\n\t// check whether the amounts match\n\treturn srcPlatformDependentCount != dstPlatformDependentCount\n}", "func (diff DiffType) merge(other DiffType) DiffType {\n\tif diff == other {\n\t\treturn diff\n\t}\n\treturn Modified\n}", "func ThreeWay(a, b, parent types.Value, vwr types.ValueReadWriter) (merged types.Value, err error) {\n\tif a == nil && b == nil {\n\t\treturn parent, nil\n\t} else if a == nil {\n\t\treturn parent, newMergeConflict(\"Cannot merge nil Value with %s.\", b.Type().Describe())\n\t} else if b == nil {\n\t\treturn parent, newMergeConflict(\"Cannot merge %s with nil value.\", a.Type().Describe())\n\t} else if unmergeable(a, b) {\n\t\treturn parent, newMergeConflict(\"Cannot merge %s with %s.\", a.Type().Describe(), b.Type().Describe())\n\t}\n\n\treturn threeWayMerge(a, b, parent, vwr)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewRabbitMQServer returns the new rabbitmq server with the connection and channel
func NewRabbitMQServer(username string, password string, host string) *Server { return &Server{ RabbitMQUsername: username, RabbitMQPassword: password, RabbitMQHost: host, } }
[ "func NewServer(config ConnectionConfig, logChan chan Log) (*Server, error) {\n\tif config.ReconInterval == 0 {\n\t\treturn nil, fmt.Errorf(\"reconnection interval must be above 0\")\n\t}\n\n\tsrv := &Server{\n\t\tmutex: &sync.RWMutex{},\n\t\tconfig: config,\n\t\tlogChan: logChan,\n\t\tchannels: make(map[string]*amqp.Channel),\n\t}\n\tif err := srv.connect(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn srv, nil\n}", "func New(uri string) *RabbitMQ {\n\n\tctx, cancel := context.WithCancel(context.Background())\n\trmq := &RabbitMQ{URI: uri, ConnectionContext: ctx, connectionCancelFunc: cancel, reconnected: false}\n\n\trmq.connect(uri)\n\n\t//launch a goroutine that will listen for messages on ErrorChan and try to reconnect in case of errors\n\tgo rmq.reconnector()\n\n\treturn rmq\n}", "func New(config *config.Config) (MsgBroker, error) {\n\n\t//create connection\n\tlog.Printf(\"Connecting to broker %s\", config.Broker)\n\tconn, err := amqp.Dial(config.Broker)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tch, err := conn.Channel()\n\t//create channel\n\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb := &amqpBroker{*conn, *ch, make(chan *amqp.Error), config}\n\tlog.Println(\"Returning broker\")\n\treturn b, nil\n}", "func createRabbitConnection(cr *Credentials) (*Rabbit, error) {\n\tconnectionStr := fmt.Sprintf(\"amqp://%v:%v@%v:%v/\", cr.Username, cr.Password, cr.Host, cr.Port)\n\tconn, err := amqp.Dial(connectionStr)\n\tfailOnError(err, \"Failed to connect to RabbitMQ; Connection string:\"+connectionStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tch, err := conn.Channel()\n\tfailOnError(err, \"Failed to open a channel\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Rabbit{\n\t\tConnection: conn,\n\t\tChannel: ch,\n\t\tCredentials: cr,\n\t}, nil\n}", "func buildChannel() (rChan *RabbitChanWriter) {\n connection, err := amqp.Dial(*uri)\n if err != nil {\n log.Printf(\"Dial: %s\", err)\n return nil\n }\n\n channel, err := connection.Channel()\n if err != nil {\n log.Printf(\"Channel: %s\", err)\n return nil\n }\n\n // build the exchange\n if err := channel.ExchangeDeclare(\n *exchange, // name\n *exchangeType, // type\n true, // durable\n false, // auto-deleted\n false, // internal\n false, // noWait\n nil, // arguments\n ); err != nil {\n log.Fatalf(\"Exchange Declare: %s\", err)\n }\n\n // create a queue with the routing key and bind to it\n if _, err := channel.QueueDeclare(\n *routingKey, // name\n true, // durable\n false, // autoDelete\n false, // exclusive\n false, // noWait\n nil, // args\n ); err != nil {\n log.Fatalf(\"Queue Declare: %s\", err)\n }\n\n if err := channel.QueueBind(\n *routingKey, // name\n *routingKey, // key\n *exchange, // exchange\n false, // noWait\n nil, // args\n ); err != nil {\n log.Fatalf(\"Queue Bind: %s\", err)\n }\n\n\n rChan = &RabbitChanWriter{channel, connection}\n return\n}", "func NewRabbitmqClient(config *oldmonkv1.ListOptions) *RabbitmqController {\n\tconn, err := amqp.Dial(config.Uri)\n\tif err != nil {\n\t\tlogger.Error(\"unable to dial\", err)\n\t\treturn &RabbitmqController{}\n\t}\n\tch, err := conn.Channel()\n\tif err != nil {\n\t\tlogger.Error(\"unable to create channel\", err)\n\t\treturn &RabbitmqController{}\n\t}\n\trabbitmqController := RabbitmqController{\n\t\tChannel: ch,\n\t\tConfig: config,\n\t}\n\treturn &rabbitmqController\n}", "func (provider *Provider) NewConnection(server string, logger *gologger.CustomLogger) (*amqp.Connection, error) {\n\tvar connection *amqp.Connection\n\n\tvar err error\n\n\tuclogger = logger\n\n\tconnectDelay := 1 // 1 second\n\n\tmaxDelay := 1800 //30 minutes\n\n\turi := \"amqp://guest:guest@\" + server\n\n\tfor {\n\t\tconnection, err = amqp.Dial(uri)\n\n\t\tif err != nil {\n\t\t\tuclogger.LogError(\"error while connecting to rabbitmq\", err)\n\n\t\t\tif connectDelay < maxDelay {\n\t\t\t\tconnectDelay *= 2\n\t\t\t} else {\n\t\t\t\treturn nil, fmt.Errorf(\"max delay reached while trying to establish rabbitmq connection to %s\", server)\n\t\t\t}\n\n\t\t\ttime.Sleep(time.Duration(connectDelay) * time.Second)\n\t\t} else {\n\t\t\tuclogger.LogDebug(\"connection established successfully\")\n\n\t\t\treturn connection, nil\n\t\t}\n\t}\n\n}", "func NewRabbitBroker() broker.Interface {\n\treturn &consumer{}\n}", "func InitRabbitMQ() (*amqp.Connection, error) {\n\tclient, err := amqp.Dial(Configuration.RabbitMQ.Addr)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to ping connection to rabbitMQ: %s\", err.Error())\n\t}\n\n\treturn client, nil\n}", "func NewServer() *Server {}", "func OpenRabbit(_ctx *components.ComponentContext, _dbInfo *dbv1beta1.RabbitmqConnection, clientFactory RabbitMQClientFactory) (RabbitMQManager, error) {\n\turi := os.Getenv(\"RABBITMQ_URI\")\n\tinsecure := os.Getenv(\"RABBITMQ_INSECURE\")\n\n\ttransport := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tInsecureSkipVerify: insecure != \"\",\n\t\t},\n\t}\n\n\tparsedUri, err := url.Parse(uri)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to parse RabbitMQ URL\")\n\t}\n\thostUri := url.URL{Scheme: parsedUri.Scheme, Host: parsedUri.Host}\n\trmqHost := hostUri.String()\n\trmqUser := parsedUri.User.Username()\n\trmqPass, _ := parsedUri.User.Password()\n\n\tif rmqHost == \"\" || rmqUser == \"\" || rmqPass == \"\" {\n\t\treturn nil, errors.New(\"empty rabbitmq connection credentials\")\n\t}\n\n\t// Connect to the rabbitmq cluster\n\treturn clientFactory(rmqHost, rmqUser, rmqPass, transport)\n}", "func channel(conn *amqp.Connection, msgtype string) (*amqp.Channel, error) {\n\t// Open a channel to communicate with the server\n\tchannel, err := conn.Channel()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Declare the exchange to use when publishing\n\tif err := channel.ExchangeDeclare(\n\t\tmsgtype,\n\t\t\"direct\",\n\t\tfalse,\n\t\tfalse,\n\t\tfalse,\n\t\tfalse,\n\t\tnil,\n\t); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Declare the queue to use when publishing\n\tchannel.QueueDeclare(\n\t\tmsgtype,\n\t\tfalse,\n\t\ttrue,\n\t\tfalse,\n\t\tfalse,\n\t\tnil,\n\t)\n\n\t// Bind the queue to the exchange\n\tchannel.QueueBind(\n\t\tmsgtype,\n\t\t\"\",\n\t\tmsgtype,\n\t\tfalse,\n\t\tnil,\n\t)\n\n\treturn channel, nil\n}", "func newServerConnection(mc *minq.Connection, ops *connectionOperations) *Connection {\n\tif mc.Role() != minq.RoleServer {\n\t\tpanic(\"minq.Server spat out a client\")\n\t}\n\treturn newConnection(mc, ops)\n}", "func (r Rabbit) NewCh() (*amqp.Channel, error) {\n\treturn r.conn.Channel()\n}", "func NewMqttServer(sqz int, url string, retain bool, qos int) *Server {\n\treturn &Server{\n\t\tsessionQueueSize: sqz,\n\t\turl: url,\n\t\ttree: topic.NewTree(),\n\t\tretain: retain,\n\t\tqos: qos,\n\t}\n}", "func createChannel(conn *amqp.Connection) (*amqp.Channel, error) {\n\tch, err := conn.Channel()\n\tlogError(err, \"rabbitmqadapter::: Failed to open a channel\")\n\n\treturn ch, err\n}", "func NewServer(queue queue.Client) services.SteadyServer {\n\treturn &server{\n\t\tqueue: queue,\n\t}\n}", "func NewRabbitMQBackend(name string, config RabbitMQConfig) (*RabbitMQBackend, error) {\n\tif config.Address == \"\" {\n\t\treturn nil, fmt.Errorf(\"Missing address key\")\n\t}\n\n\t// create the rabbitmq http client\n\tclient, err := rabbit.NewClient(config.Address, config.Username, config.Password)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// ensure we can connect to the backend and that it works\n\twhoami, permErr := client.Whoami()\n\tif permErr != nil {\n\t\tlog.Errorf(\"Sad: %+v\", permErr)\n\t\treturn nil, permErr\n\t}\n\n\tif whoami.Name == \"\" {\n\t\treturn nil, fmt.Errorf(\"Possible authentication issue, could not get information about my own account\")\n\t}\n\n\tbackend := &RabbitMQBackend{}\n\tbackend.Name = name\n\tbackend.Config = config\n\tbackend.Connection = client\n\n\treturn backend, nil\n}", "func NewRabbitMQ() *RabbitMQ {\n\treturn &RabbitMQ{}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Defang Takes an IOC and defangs it using the standard defangReplacements
func (ioc *IOC) Defang() *IOC { copy := *ioc ioc = &copy // Just do a string replace on each if replacements, ok := defangReplacements[ioc.Type]; ok { for _, fangPair := range replacements { ioc.IOC = strings.ReplaceAll(ioc.IOC, fangPair.fanged, fangPair.defanged) } } return ioc }
[ "func (ioc *IOC) Fang() *IOC {\n\tcopy := *ioc\n\tioc = &copy\n\n\t// String replace all defangs in our standard set\n\tif replacements, ok := defangReplacements[ioc.Type]; ok {\n\t\tfor _, fangPair := range replacements {\n\t\t\tioc.IOC = strings.ReplaceAll(ioc.IOC, fangPair.defanged, fangPair.fanged)\n\t\t}\n\t}\n\n\t// Regex replace everything from the fang replacements\n\tif replacements, ok := fangReplacements[ioc.Type]; ok {\n\t\tfor _, regexReplacement := range replacements {\n\t\t\t// Offset is incase we shrink the string and need to offset locations\n\t\t\toffset := 0\n\n\t\t\t// Get indexes of replacements and replace them\n\t\t\ttoReplace := regexReplacement.pattern.FindAllStringIndex(ioc.IOC, -1)\n\t\t\tfor _, location := range toReplace {\n\t\t\t\t// Update this found string\n\t\t\t\tstartSize := len(ioc.IOC)\n\t\t\t\tioc.IOC = ioc.IOC[0:location[0]-offset] + regexReplacement.replace + ioc.IOC[location[1]-offset:len(ioc.IOC)]\n\t\t\t\t// Update offset with how much the string shrunk (or grew)\n\t\t\t\toffset += startSize - len(ioc.IOC)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ioc\n}", "func goConvInvoc(a ClientArg) string {\n\tjsonConvTmpl := `\nvar {{.GoArg}} {{.GoType}}\nif {{.FlagArg}} != nil && len(*{{.FlagArg}}) > 0 {\n\terr = json.Unmarshal([]byte(*{{.FlagArg}}), &{{.GoArg}})\n\tif err != nil {\n\t\tpanic(errors.Wrapf(err, \"unmarshalling {{.GoArg}} from %v:\", {{.FlagArg}}))\n\t}\n}\n`\n\tif a.Repeated || !a.IsBaseType {\n\t\tcode, err := applyTemplate(\"UnmarshalCliArgs\", jsonConvTmpl, a, nil)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Couldn't apply template: %v\", err))\n\t\t}\n\t\treturn code\n\t}\n\treturn fmt.Sprintf(`%s := %s`, a.GoArg, flagTypeConversion(a))\n}", "func folSubstitute(f FolFormula, v Variable, o Object) FolFormula {\n\treturn createFolFormula(Tokenize(\n\t\tstrings.Replace(f.Pddl(), v.name, o.name, -1)))\n}", "func enableDefaultInjection(ns namespace.Instance) error {\n\tvar errs *multierror.Error\n\terrs = multierror.Append(errs, ns.SetLabel(\"istio-injection\", \"enabled\"))\n\terrs = multierror.Append(errs, ns.RemoveLabel(\"istio.io/rev\"))\n\treturn errs.ErrorOrNil()\n}", "func Defun(opname string, funcBody Mapper, quoter Mapper, env *Environment) {\n\topsym := GlobalEnvironment.Intern(opname, false)\n\topsym.Value = Atomize(&internalOp{sym: opsym, caller: funcBody, quoter: quoter})\n\tT().Debugf(\"new interal op %s = %v\", opsym.Name, opsym.Value)\n}", "func (h *Handler) sidecarInjection(namespace string, del bool) error {\n\tkclient := h.KubeClient\n\tif kclient == nil {\n\t\treturn ErrNilClient\n\t}\n\n\t// updating the label on the namespace\n\tns, err := kclient.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif ns.ObjectMeta.Labels == nil {\n\t\tns.ObjectMeta.Labels = map[string]string{}\n\t}\n\tns.ObjectMeta.Labels[\"openservicemesh.io/monitored-by\"] = \"osm\"\n\n\tif del {\n\t\tdelete(ns.ObjectMeta.Labels, \"openservicemesh.io/monitored-by\")\n\t}\n\n\t// updating the annotations on the namespace\n\tif ns.ObjectMeta.Annotations == nil {\n\t\tns.ObjectMeta.Annotations = map[string]string{}\n\t}\n\tns.ObjectMeta.Annotations[\"openservicemesh.io/sidecar-injection\"] = \"enabled\"\n\n\tif del {\n\t\tdelete(ns.ObjectMeta.Annotations, \"openservicemesh.io/sidecar-injection\")\n\t}\n\n\tfmt.Println(ns.ObjectMeta)\n\n\t_, err = kclient.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func render(template string, def definition, params map[string]interface{}) (string, error) {\n\tctx := plush.NewContext()\n\tctx.Set(\"camelize_down\", camelizeDown)\n\tctx.Set(\"def\", def)\n\tctx.Set(\"params\", params)\n\ts, err := plush.Render(string(template), ctx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn s, nil\n}", "func (h *Handler) sidecarInjection(del bool, version, ns string) error {\n\texe, err := h.getExecutable(version)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinjectCmd := \"add\"\n\tif del {\n\t\tinjectCmd = \"remove\"\n\t}\n\n\tcmd := &exec.Cmd{\n\t\tPath: exe,\n\t\tArgs: []string{\n\t\t\texe,\n\t\t\t\"namespace\",\n\t\t\tinjectCmd,\n\t\t\tns,\n\t\t},\n\t\tStdout: os.Stdout,\n\t\tStderr: os.Stdout,\n\t}\n\n\tif err := cmd.Run(); err != nil {\n\t\treturn ErrRunExecutable(err)\n\t}\n\n\treturn nil\n}", "func init() {\n\timports.Packages[\"github.com/cosmos72/gomacro/base\"] = imports.Package{\n\t\tBinds: map[string]r.Value{\n\t\t\t\"CMacroExpand\": r.ValueOf(CMacroExpand),\n\t\t\t\"CMacroExpand1\": r.ValueOf(CMacroExpand1),\n\t\t\t\"CMacroExpandCodewalk\": r.ValueOf(CMacroExpandCodewalk),\n\t\t\t\"CmdOptForceEval\": r.ValueOf(CmdOptForceEval),\n\t\t\t\"CmdOptQuit\": r.ValueOf(CmdOptQuit),\n\t\t\t\"CollectNestedUnquotes\": r.ValueOf(CollectNestedUnquotes),\n\t\t\t\"DescendNestedUnquotes\": r.ValueOf(DescendNestedUnquotes),\n\t\t\t\"DuplicateNestedUnquotes\": r.ValueOf(DuplicateNestedUnquotes),\n\t\t\t\"False\": r.ValueOf(&False).Elem(),\n\t\t\t\"IsGensym\": r.ValueOf(IsGensym),\n\t\t\t\"IsGensymAnonymous\": r.ValueOf(IsGensymAnonymous),\n\t\t\t\"IsGensymInterface\": r.ValueOf(IsGensymInterface),\n\t\t\t\"IsGensymPrivate\": r.ValueOf(IsGensymPrivate),\n\t\t\t\"MakeBufReadline\": r.ValueOf(MakeBufReadline),\n\t\t\t\"MakeNestedQuote\": r.ValueOf(MakeNestedQuote),\n\t\t\t\"MakeQuote\": r.ValueOf(MakeQuote),\n\t\t\t\"MakeQuote2\": r.ValueOf(MakeQuote2),\n\t\t\t\"MakeTtyReadline\": r.ValueOf(MakeTtyReadline),\n\t\t\t\"MaxInt\": r.ValueOf(MaxInt),\n\t\t\t\"MaxUint\": r.ValueOf(MaxUint),\n\t\t\t\"MaxUint16\": r.ValueOf(MaxUint16),\n\t\t\t\"MinInt\": r.ValueOf(MinInt),\n\t\t\t\"NewGlobals\": r.ValueOf(NewGlobals),\n\t\t\t\"Nil\": r.ValueOf(&Nil).Elem(),\n\t\t\t\"None\": r.ValueOf(&None).Elem(),\n\t\t\t\"One\": r.ValueOf(&One).Elem(),\n\t\t\t\"OptCollectDeclarations\": r.ValueOf(OptCollectDeclarations),\n\t\t\t\"OptCollectStatements\": r.ValueOf(OptCollectStatements),\n\t\t\t\"OptCtrlCEnterDebugger\": r.ValueOf(OptCtrlCEnterDebugger),\n\t\t\t\"OptDebugCallStack\": r.ValueOf(OptDebugCallStack),\n\t\t\t\"OptDebugDebugger\": r.ValueOf(OptDebugDebugger),\n\t\t\t\"OptDebugField\": r.ValueOf(OptDebugField),\n\t\t\t\"OptDebugFromReflect\": r.ValueOf(OptDebugFromReflect),\n\t\t\t\"OptDebugGenerics\": r.ValueOf(OptDebugGenerics),\n\t\t\t\"OptDebugMacroExpand\": r.ValueOf(OptDebugMacroExpand),\n\t\t\t\"OptDebugMethod\": r.ValueOf(OptDebugMethod),\n\t\t\t\"OptDebugParse\": r.ValueOf(OptDebugParse),\n\t\t\t\"OptDebugQuasiquote\": r.ValueOf(OptDebugQuasiquote),\n\t\t\t\"OptDebugRecover\": r.ValueOf(OptDebugRecover),\n\t\t\t\"OptDebugSleepOnSwitch\": r.ValueOf(OptDebugSleepOnSwitch),\n\t\t\t\"OptDebugger\": r.ValueOf(OptDebugger),\n\t\t\t\"OptKeepUntyped\": r.ValueOf(OptKeepUntyped),\n\t\t\t\"OptMacroExpandOnly\": r.ValueOf(OptMacroExpandOnly),\n\t\t\t\"OptPanicStackTrace\": r.ValueOf(OptPanicStackTrace),\n\t\t\t\"OptShowCompile\": r.ValueOf(OptShowCompile),\n\t\t\t\"OptShowEval\": r.ValueOf(OptShowEval),\n\t\t\t\"OptShowEvalType\": r.ValueOf(OptShowEvalType),\n\t\t\t\"OptShowMacroExpand\": r.ValueOf(OptShowMacroExpand),\n\t\t\t\"OptShowParse\": r.ValueOf(OptShowParse),\n\t\t\t\"OptShowPrompt\": r.ValueOf(OptShowPrompt),\n\t\t\t\"OptShowTime\": r.ValueOf(OptShowTime),\n\t\t\t\"OptTrapPanic\": r.ValueOf(OptTrapPanic),\n\t\t\t\"ParseOptions\": r.ValueOf(ParseOptions),\n\t\t\t\"ReadBytes\": r.ValueOf(ReadBytes),\n\t\t\t\"ReadMultiline\": r.ValueOf(ReadMultiline),\n\t\t\t\"ReadOptCollectAllComments\": r.ValueOf(ReadOptCollectAllComments),\n\t\t\t\"ReadOptShowPrompt\": r.ValueOf(ReadOptShowPrompt),\n\t\t\t\"ReadString\": r.ValueOf(ReadString),\n\t\t\t\"SigAll\": r.ValueOf(SigAll),\n\t\t\t\"SigDebug\": r.ValueOf(SigDebug),\n\t\t\t\"SigDefer\": r.ValueOf(SigDefer),\n\t\t\t\"SigInterrupt\": r.ValueOf(SigInterrupt),\n\t\t\t\"SigNone\": r.ValueOf(SigNone),\n\t\t\t\"SigReturn\": r.ValueOf(SigReturn),\n\t\t\t\"SimplifyAstForQuote\": r.ValueOf(SimplifyAstForQuote),\n\t\t\t\"SimplifyNodeForQuote\": r.ValueOf(SimplifyNodeForQuote),\n\t\t\t\"StartSignalHandler\": r.ValueOf(StartSignalHandler),\n\t\t\t\"StopSignalHandler\": r.ValueOf(StopSignalHandler),\n\t\t\t\"StrGensym\": r.ValueOf(StrGensym),\n\t\t\t\"StrGensymAnonymous\": r.ValueOf(StrGensymAnonymous),\n\t\t\t\"StrGensymInterface\": r.ValueOf(StrGensymInterface),\n\t\t\t\"StrGensymPrivate\": r.ValueOf(StrGensymPrivate),\n\t\t\t\"True\": r.ValueOf(&True).Elem(),\n\t\t\t\"TypeOfBool\": r.ValueOf(&TypeOfBool).Elem(),\n\t\t\t\"TypeOfByte\": r.ValueOf(&TypeOfByte).Elem(),\n\t\t\t\"TypeOfComplex128\": r.ValueOf(&TypeOfComplex128).Elem(),\n\t\t\t\"TypeOfComplex64\": r.ValueOf(&TypeOfComplex64).Elem(),\n\t\t\t\"TypeOfDeferFunc\": r.ValueOf(&TypeOfDeferFunc).Elem(),\n\t\t\t\"TypeOfError\": r.ValueOf(&TypeOfError).Elem(),\n\t\t\t\"TypeOfFloat32\": r.ValueOf(&TypeOfFloat32).Elem(),\n\t\t\t\"TypeOfFloat64\": r.ValueOf(&TypeOfFloat64).Elem(),\n\t\t\t\"TypeOfInt\": r.ValueOf(&TypeOfInt).Elem(),\n\t\t\t\"TypeOfInt16\": r.ValueOf(&TypeOfInt16).Elem(),\n\t\t\t\"TypeOfInt32\": r.ValueOf(&TypeOfInt32).Elem(),\n\t\t\t\"TypeOfInt64\": r.ValueOf(&TypeOfInt64).Elem(),\n\t\t\t\"TypeOfInt8\": r.ValueOf(&TypeOfInt8).Elem(),\n\t\t\t\"TypeOfInterface\": r.ValueOf(&TypeOfInterface).Elem(),\n\t\t\t\"TypeOfPtrBool\": r.ValueOf(&TypeOfPtrBool).Elem(),\n\t\t\t\"TypeOfPtrComplex128\": r.ValueOf(&TypeOfPtrComplex128).Elem(),\n\t\t\t\"TypeOfPtrComplex64\": r.ValueOf(&TypeOfPtrComplex64).Elem(),\n\t\t\t\"TypeOfPtrFloat32\": r.ValueOf(&TypeOfPtrFloat32).Elem(),\n\t\t\t\"TypeOfPtrFloat64\": r.ValueOf(&TypeOfPtrFloat64).Elem(),\n\t\t\t\"TypeOfPtrInt\": r.ValueOf(&TypeOfPtrInt).Elem(),\n\t\t\t\"TypeOfPtrInt16\": r.ValueOf(&TypeOfPtrInt16).Elem(),\n\t\t\t\"TypeOfPtrInt32\": r.ValueOf(&TypeOfPtrInt32).Elem(),\n\t\t\t\"TypeOfPtrInt64\": r.ValueOf(&TypeOfPtrInt64).Elem(),\n\t\t\t\"TypeOfPtrInt8\": r.ValueOf(&TypeOfPtrInt8).Elem(),\n\t\t\t\"TypeOfPtrString\": r.ValueOf(&TypeOfPtrString).Elem(),\n\t\t\t\"TypeOfPtrUint\": r.ValueOf(&TypeOfPtrUint).Elem(),\n\t\t\t\"TypeOfPtrUint16\": r.ValueOf(&TypeOfPtrUint16).Elem(),\n\t\t\t\"TypeOfPtrUint32\": r.ValueOf(&TypeOfPtrUint32).Elem(),\n\t\t\t\"TypeOfPtrUint64\": r.ValueOf(&TypeOfPtrUint64).Elem(),\n\t\t\t\"TypeOfPtrUint8\": r.ValueOf(&TypeOfPtrUint8).Elem(),\n\t\t\t\"TypeOfPtrUintptr\": r.ValueOf(&TypeOfPtrUintptr).Elem(),\n\t\t\t\"TypeOfReflectType\": r.ValueOf(&TypeOfReflectType).Elem(),\n\t\t\t\"TypeOfRune\": r.ValueOf(&TypeOfRune).Elem(),\n\t\t\t\"TypeOfString\": r.ValueOf(&TypeOfString).Elem(),\n\t\t\t\"TypeOfUint\": r.ValueOf(&TypeOfUint).Elem(),\n\t\t\t\"TypeOfUint16\": r.ValueOf(&TypeOfUint16).Elem(),\n\t\t\t\"TypeOfUint32\": r.ValueOf(&TypeOfUint32).Elem(),\n\t\t\t\"TypeOfUint64\": r.ValueOf(&TypeOfUint64).Elem(),\n\t\t\t\"TypeOfUint8\": r.ValueOf(&TypeOfUint8).Elem(),\n\t\t\t\"TypeOfUintptr\": r.ValueOf(&TypeOfUintptr).Elem(),\n\t\t\t\"UnwrapTrivialAst\": r.ValueOf(UnwrapTrivialAst),\n\t\t\t\"UnwrapTrivialAstKeepBlocks\": r.ValueOf(UnwrapTrivialAstKeepBlocks),\n\t\t\t\"UnwrapTrivialNode\": r.ValueOf(UnwrapTrivialNode),\n\t\t\t\"ZeroStrings\": r.ValueOf(&ZeroStrings).Elem(),\n\t\t\t\"ZeroTypes\": r.ValueOf(&ZeroTypes).Elem(),\n\t\t\t\"ZeroValues\": r.ValueOf(&ZeroValues).Elem(),\n\t\t}, Types: map[string]r.Type{\n\t\t\t\"BufReadline\": r.TypeOf((*BufReadline)(nil)).Elem(),\n\t\t\t\"CmdOpt\": r.TypeOf((*CmdOpt)(nil)).Elem(),\n\t\t\t\"Globals\": r.TypeOf((*Globals)(nil)).Elem(),\n\t\t\t\"Inspector\": r.TypeOf((*Inspector)(nil)).Elem(),\n\t\t\t\"Options\": r.TypeOf((*Options)(nil)).Elem(),\n\t\t\t\"Output\": r.TypeOf((*Output)(nil)).Elem(),\n\t\t\t\"ReadOptions\": r.TypeOf((*ReadOptions)(nil)).Elem(),\n\t\t\t\"Readline\": r.TypeOf((*Readline)(nil)).Elem(),\n\t\t\t\"Signal\": r.TypeOf((*Signal)(nil)).Elem(),\n\t\t\t\"Signals\": r.TypeOf((*Signals)(nil)).Elem(),\n\t\t\t\"TtyReadline\": r.TypeOf((*TtyReadline)(nil)).Elem(),\n\t\t\t\"WhichMacroExpand\": r.TypeOf((*WhichMacroExpand)(nil)).Elem(),\n\t\t}, Proxies: map[string]r.Type{\n\t\t\t\"Inspector\": r.TypeOf((*P_github_com_cosmos72_gomacro_base_Inspector)(nil)).Elem(),\n\t\t\t\"Readline\": r.TypeOf((*P_github_com_cosmos72_gomacro_base_Readline)(nil)).Elem(),\n\t\t}, Untypeds: map[string]string{\n\t\t\t\"CmdOptForceEval\": \"int:2\",\n\t\t\t\"CmdOptQuit\": \"int:1\",\n\t\t}, Wrappers: map[string][]string{\n\t\t\t\"Globals\": []string{\"Copy\", \"Debugf\", \"Error\", \"ErrorAt\", \"Errorf\", \"Fprintf\", \"IncLine\", \"IncLineBytes\", \"MakeRuntimeError\", \"Position\", \"Sprintf\", \"ToString\", \"WarnExtraValues\", \"Warnf\"},\n\t\t\t\"Output\": []string{\"Copy\", \"ErrorAt\", \"Errorf\", \"Fprintf\", \"IncLine\", \"IncLineBytes\", \"MakeRuntimeError\", \"Position\", \"Sprintf\", \"ToString\"},\n\t\t},\n\t}\n}", "func AutoInject(v interface{}) injectionWish {\n\tt, isType := v.(reflect.Type)\n\tif !isType {\n\t\tt = reflect.TypeOf(v)\n\t}\n\treturn injectionWish(GetTypeCode(t))\n}", "func setupSubst(objType string, search string, replace string) {\n\tif !globalType[objType] {\n\t\tabort.Msg(\"Unknown type %s\", objType)\n\t}\n\taddSubst := func(objType, search, replace string) {\n\t\tsubMap, ok := subst[objType]\n\t\tif !ok {\n\t\t\tsubMap = make(map[string]string)\n\t\t\tsubst[objType] = subMap\n\t\t}\n\t\tsubMap[search] = replace\n\t}\n\n\taddSubst(objType, search, replace)\n\n\tfor _, other := range aliases[objType] {\n\t\taddSubst(other, search, replace)\n\t}\n}", "func istioUninject(args []string, opts *options.Options) error {\n\tglooNS := opts.Metadata.Namespace\n\n\tclient := helpers.MustKubeClient()\n\t_, err := client.CoreV1().Namespaces().Get(opts.Top.Ctx, glooNS, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Remove gateway_proxy_sds cluster from the gateway-proxy configmap\n\tconfigMaps, err := client.CoreV1().ConfigMaps(glooNS).List(opts.Top.Ctx, metav1.ListOptions{})\n\tfor _, configMap := range configMaps.Items {\n\t\tif configMap.Name == gatewayProxyConfigMap {\n\t\t\t// Make sure we don't already have the gateway_proxy_sds cluster set up\n\t\t\terr := removeSdsCluster(&configMap)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t_, err = client.CoreV1().ConfigMaps(glooNS).Update(opts.Top.Ctx, &configMap, metav1.UpdateOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tdeployments, err := client.AppsV1().Deployments(glooNS).List(opts.Top.Ctx, metav1.ListOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, deployment := range deployments.Items {\n\t\tif deployment.Name == \"gateway-proxy\" {\n\t\t\tcontainers := deployment.Spec.Template.Spec.Containers\n\n\t\t\t// Remove Sidecars\n\t\t\tsdsPresent := false\n\t\t\tistioPresent := false\n\t\t\tif len(containers) > 1 {\n\t\t\t\tfor i := len(containers) - 1; i >= 0; i-- {\n\t\t\t\t\tcontainer := containers[i]\n\t\t\t\t\tif container.Name == \"sds\" {\n\t\t\t\t\t\tsdsPresent = true\n\t\t\t\t\t\tcopy(containers[i:], containers[i+1:])\n\t\t\t\t\t\tcontainers = containers[:len(containers)-1]\n\t\t\t\t\t}\n\t\t\t\t\tif container.Name == \"istio-proxy\" {\n\t\t\t\t\t\tistioPresent = true\n\n\t\t\t\t\t\tcopy(containers[i:], containers[i+1:])\n\t\t\t\t\t\tcontainers = containers[:len(containers)-1]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !sdsPresent || !istioPresent {\n\t\t\t\treturn ErrMissingSidecars\n\t\t\t}\n\n\t\t\tdeployment.Spec.Template.Spec.Containers = containers\n\n\t\t\tremoveIstioVolumes(&deployment)\n\t\t\t_, err = client.AppsV1().Deployments(glooNS).Update(opts.Top.Ctx, &deployment, metav1.UpdateOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t}\n\t}\n\n\treturn nil\n}", "func revertInitialisms(s string) string {\n\tfor i := 0; i < len(commonInitialisms); i++ {\n\t\ts = strings.ReplaceAll(s, commonInitialisms[i][0], commonInitialisms[i][1])\n\t}\n\treturn s\n}", "func (view *View) funcReplace(search, replace, str interface{}) string {\n\treturn gstr.Replace(gconv.String(str), gconv.String(search), gconv.String(replace), -1)\n}", "func preprocessString(alias *Alias, str string) (string, error) {\n\t// Load Remote/Local alias definitions\n\tif externalDefinitionErr := alias.loadExternalAlias(); externalDefinitionErr != nil {\n\t\treturn \"\", externalDefinitionErr\n\t}\n\n\talias.loadGlobalAlias()\n\n\t// Validate alias definitions\n\tif improperFormatErr := alias.resolveMapAndValidate(); improperFormatErr != nil {\n\t\treturn \"\", improperFormatErr\n\t}\n\n\tvar out strings.Builder\n\tvar command strings.Builder\n\tongoingCmd := false\n\n\t// Search and replace all strings with the directive\n\t// (sam) we add a placeholder space at the end of the string below\n\t// to force the state machine to END. We remove it before returning\n\t// the result to user\n\tfor _, char := range str + \" \" {\n\t\tif ongoingCmd {\n\t\t\tif char == alias.directive && command.Len() == 0 { // Escape Character Triggered\n\t\t\t\tout.WriteRune(alias.directive)\n\t\t\t\tongoingCmd = false\n\t\t\t} else if !isAlphanumeric(char) { // Delineates the end of an alias\n\t\t\t\tresolvedCommand, commandPresent := alias.AliasMap[command.String()]\n\t\t\t\t// If command is not found we assume this to be the expect item itself.\n\t\t\t\tif !commandPresent {\n\t\t\t\t\tout.WriteString(string(alias.directive) + command.String() + string(char))\n\t\t\t\t\tongoingCmd = false\n\t\t\t\t\tcommand.Reset()\n\t\t\t\t} else {\n\t\t\t\t\tout.WriteString(resolvedCommand)\n\t\t\t\t\tif char != alias.directive {\n\t\t\t\t\t\tongoingCmd = false\n\t\t\t\t\t\tout.WriteRune(char)\n\t\t\t\t\t}\n\t\t\t\t\tcommand.Reset()\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tcommand.WriteRune(char)\n\t\t\t}\n\t\t} else if char == alias.directive {\n\t\t\tongoingCmd = true\n\t\t} else {\n\t\t\tout.WriteRune(char)\n\t\t}\n\t}\n\n\treturn strings.TrimSuffix(out.String(), \" \"), nil\n}", "func CalicoctlReplace(yaml string, args ...interface{}) {\n\tcmd := exec.Command(\"calicoctl\", \"replace\", \"-f\", \"-\")\n\tcalicoctlCmdWithFile(cmd, yaml, args...)\n}", "func replaceImports(content string, imports []string) string {\n\t// make sure that deeper imports will be replaced first\n\t// it is required for correct processing of nested packages\n\tsort.Sort(sort.Reverse(sort.StringSlice(imports)))\n\tfor i, imp := range imports {\n\t\tcontent = strings.Replace(content, imp, genPackageAlias(i), -1)\n\t}\n\treturn content\n}", "func render(template string, def definition, params map[string]interface{}) (string, error) {\n\tctx := plush.NewContext()\n\tctx.Set(\"camelize_down\", camelizeDown)\n\tctx.Set(\"underscore\", underscore)\n\tctx.Set(\"def\", def)\n\tctx.Set(\"params\", params)\n\tctx.Set(\"rust_type\", rustType)\n\tctx.Set(\"rust_default\", rustDefault)\n\tctx.Set(\"has\", has)\n\ts, err := plush.Render(string(template), ctx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn s, nil\n}", "func GenNgService(db *gorm.DB) {\n\n\t// create the list of structs\n\tvar structs []models.Struct\n\tdb.Find(&structs)\n\n\t// generates one detail compenent per struct\n\tfor _, _struct := range structs {\n\n\t\tstructName := strings.ToLower(_struct.Name)\n\n\t\tvar fields models.Fields\n\n\t\t// fetch all association fields worthy of a assocation path\n\t\t// ie. where the AssociatedStructID is the struct of interest\n\n\t\tcolumnName := gorm.ToColumnName(\"AssociatedStructID\")\n\t\t// log.Output(0, fmt.Sprintf(\"Column name: %s\", columnName))\n\t\tqueryAssoc := db.Where(fmt.Sprintf(\"%s = ?\", columnName), _struct.ID).Find(&fields)\n\t\tif queryAssoc.Error != nil {\n\t\t\tlog.Fatal(queryAssoc.Error.Error())\n\t\t}\n\n\t\t// question : do you like variables with long name ?\n\t\t// answer : when the problem is complex ( elts to deal with)\n\t\tImportsOfStructWithPointerFieldToCurrentStruct := \"\"\n\t\tGettersOfStructWithPointerFieldToCurrentStruct := \"\"\n\n\t\t// TODO when multiple field have the same association struct\n\t\tassocStructMatch := make(map[uint]bool)\n\t\tfor _, field := range fields {\n\n\t\t\tif field.Kind == reflect.Ptr {\n\t\t\t\t// fetch the assoc struct\n\t\t\t\tvar assocStruct models.Struct\n\t\t\t\tdb.First(&assocStruct, field.StructID)\n\t\t\t\tlowerCaseAssocStructName := strings.ToLower(assocStruct.Name)\n\t\t\t\tlowerCaseFieldname := strings.ToLower(field.Name)\n\n\t\t\t\t// avoid multiple imports\n\t\t\t\t_, ok := assocStructMatch[field.StructID]\n\t\t\t\tif !ok && (assocStruct.ID != _struct.ID) {\n\t\t\t\t\tassocStructMatch[field.StructID] = true\n\n\t\t\t\t\timportStringTS := ngTemplateForImportOfStructWithPointerFieldToCurrentStruct\n\n\t\t\t\t\t// for imports\n\t\t\t\t\timportStringTS = strings.ReplaceAll(importStringTS, \"{{AssocStructName}}\", assocStruct.Name)\n\t\t\t\t\timportStringTS = strings.ReplaceAll(importStringTS, \"{{assocStructName}}\", lowerCaseAssocStructName)\n\t\t\t\t\tImportsOfStructWithPointerFieldToCurrentStruct += importStringTS\n\t\t\t\t}\n\t\t\t\tgetterStringTS := ngTemplateForViaGetterOfStructWithPointerFieldToCurrentStruct\n\n\t\t\t\t// for getters\n\t\t\t\tgetterStringTS = strings.ReplaceAll(getterStringTS, \"{{AssocStructName}}\", assocStruct.Name)\n\t\t\t\tgetterStringTS = strings.ReplaceAll(getterStringTS, \"{{assocStructName}}\", lowerCaseAssocStructName)\n\t\t\t\tgetterStringTS = strings.ReplaceAll(getterStringTS, \"{{Fieldname}}\", field.Name)\n\t\t\t\tgetterStringTS = strings.ReplaceAll(getterStringTS, \"{{fieldname}}\", lowerCaseFieldname)\n\t\t\t\tGettersOfStructWithPointerFieldToCurrentStruct += getterStringTS\n\t\t\t}\n\n\t\t}\n\n\t\tstringTS := templateNgService\n\n\t\tstringTS = strings.ReplaceAll(stringTS,\n\t\t\t\"{{ImportsOfStructWithPointerFieldToCurrentStruct}}\",\n\t\t\tImportsOfStructWithPointerFieldToCurrentStruct)\n\t\tstringTS = strings.ReplaceAll(stringTS,\n\t\t\t\"{{GettersOfStructWithPointerFieldToCurrentStruct}}\",\n\t\t\tGettersOfStructWithPointerFieldToCurrentStruct)\n\t\tstringTS = strings.ReplaceAll(stringTS, \"{{Structname}}\", _struct.Name)\n\t\tstringTS = strings.ReplaceAll(stringTS, \"{{structname}}\", structName)\n\t\tstringTS = strings.ReplaceAll(stringTS, \"{{addr}}\", ADDR)\n\t\tfile := createSingleFileInNgTargetPath(fmt.Sprintf(\"%s.service.ts\", structName))\n\t\tdefer file.Close()\n\t\tfmt.Fprint(file, stringTS)\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fang Takes an IOC and removes the defanging stuff from it (converts to a fanged IOC).
func (ioc *IOC) Fang() *IOC { copy := *ioc ioc = &copy // String replace all defangs in our standard set if replacements, ok := defangReplacements[ioc.Type]; ok { for _, fangPair := range replacements { ioc.IOC = strings.ReplaceAll(ioc.IOC, fangPair.defanged, fangPair.fanged) } } // Regex replace everything from the fang replacements if replacements, ok := fangReplacements[ioc.Type]; ok { for _, regexReplacement := range replacements { // Offset is incase we shrink the string and need to offset locations offset := 0 // Get indexes of replacements and replace them toReplace := regexReplacement.pattern.FindAllStringIndex(ioc.IOC, -1) for _, location := range toReplace { // Update this found string startSize := len(ioc.IOC) ioc.IOC = ioc.IOC[0:location[0]-offset] + regexReplacement.replace + ioc.IOC[location[1]-offset:len(ioc.IOC)] // Update offset with how much the string shrunk (or grew) offset += startSize - len(ioc.IOC) } } } return ioc }
[ "func (ioc *IOC) Defang() *IOC {\n\tcopy := *ioc\n\tioc = &copy\n\n\t// Just do a string replace on each\n\tif replacements, ok := defangReplacements[ioc.Type]; ok {\n\t\tfor _, fangPair := range replacements {\n\t\t\tioc.IOC = strings.ReplaceAll(ioc.IOC, fangPair.fanged, fangPair.defanged)\n\t\t}\n\t}\n\n\treturn ioc\n}", "func istioUninject(args []string, opts *options.Options) error {\n\tglooNS := opts.Metadata.Namespace\n\n\tclient := helpers.MustKubeClient()\n\t_, err := client.CoreV1().Namespaces().Get(opts.Top.Ctx, glooNS, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Remove gateway_proxy_sds cluster from the gateway-proxy configmap\n\tconfigMaps, err := client.CoreV1().ConfigMaps(glooNS).List(opts.Top.Ctx, metav1.ListOptions{})\n\tfor _, configMap := range configMaps.Items {\n\t\tif configMap.Name == gatewayProxyConfigMap {\n\t\t\t// Make sure we don't already have the gateway_proxy_sds cluster set up\n\t\t\terr := removeSdsCluster(&configMap)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t_, err = client.CoreV1().ConfigMaps(glooNS).Update(opts.Top.Ctx, &configMap, metav1.UpdateOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tdeployments, err := client.AppsV1().Deployments(glooNS).List(opts.Top.Ctx, metav1.ListOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, deployment := range deployments.Items {\n\t\tif deployment.Name == \"gateway-proxy\" {\n\t\t\tcontainers := deployment.Spec.Template.Spec.Containers\n\n\t\t\t// Remove Sidecars\n\t\t\tsdsPresent := false\n\t\t\tistioPresent := false\n\t\t\tif len(containers) > 1 {\n\t\t\t\tfor i := len(containers) - 1; i >= 0; i-- {\n\t\t\t\t\tcontainer := containers[i]\n\t\t\t\t\tif container.Name == \"sds\" {\n\t\t\t\t\t\tsdsPresent = true\n\t\t\t\t\t\tcopy(containers[i:], containers[i+1:])\n\t\t\t\t\t\tcontainers = containers[:len(containers)-1]\n\t\t\t\t\t}\n\t\t\t\t\tif container.Name == \"istio-proxy\" {\n\t\t\t\t\t\tistioPresent = true\n\n\t\t\t\t\t\tcopy(containers[i:], containers[i+1:])\n\t\t\t\t\t\tcontainers = containers[:len(containers)-1]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !sdsPresent || !istioPresent {\n\t\t\t\treturn ErrMissingSidecars\n\t\t\t}\n\n\t\t\tdeployment.Spec.Template.Spec.Containers = containers\n\n\t\t\tremoveIstioVolumes(&deployment)\n\t\t\t_, err = client.AppsV1().Deployments(glooNS).Update(opts.Top.Ctx, &deployment, metav1.UpdateOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t}\n\t}\n\n\treturn nil\n}", "func AutoUnpatch(f func()) {\n\tdefer popPatchStack()\n\taddPatchStack()\n\tf()\n}", "func clean(isa *isabellebot.Bot) {\n\tisa.Service.Event.Clean()\n\tisa.Service.Trade.Clean()\n\tisa.Service.User.Clean()\n}", "func ClearRefactorings() {\n\trefactorings = map[string]refactoring.Refactoring{}\n\trefactoringsInOrder = []string{}\n}", "func (self *dependencies) remove(definition string) {\n for index, dependency := range *self {\n if dependency == definition {\n (*self) = append((*self)[:index], (*self)[index+1:]...)\n }\n }\n}", "func (b *Bzr) Clean(d *Dependency) {\n\treturn\n}", "func (rign *CFGoReadIgnore) Clean() {\n}", "func Untag(cfg Config) error {\n\ts, err := Read(cfg.Path())\n\tif err != nil {\n\t\treturn nil\n\t}\n\tfor _, arg := range cfg.Tags() {\n\t\tname, classifier, hasClassifier := nameAndClassifier(arg)\n\t\tif hasClassifier {\n\t\t\ts.DelClassifier(name, classifier)\n\t\t} else {\n\t\t\ts.Del(name)\n\t\t}\n\t}\n\n\treturn Write(cfg.Path(), s)\n}", "func elimDeadAutosGeneric(f *Func) {\n\taddr := make(map[*Value]GCNode) // values that the address of the auto reaches\n\telim := make(map[*Value]GCNode) // values that could be eliminated if the auto is\n\tused := make(map[GCNode]bool) // used autos that must be kept\n\n\t// visit the value and report whether any of the maps are updated\n\tvisit := func(v *Value) (changed bool) {\n\t\targs := v.Args\n\t\tswitch v.Op {\n\t\tcase OpAddr, OpLocalAddr:\n\t\t\t// Propagate the address if it points to an auto.\n\t\t\tn, ok := v.Aux.(GCNode)\n\t\t\tif !ok || n.StorageClass() != ClassAuto {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif addr[v] == nil {\n\t\t\t\taddr[v] = n\n\t\t\t\tchanged = true\n\t\t\t}\n\t\t\treturn\n\t\tcase OpVarDef, OpVarKill:\n\t\t\t// v should be eliminated if we eliminate the auto.\n\t\t\tn, ok := v.Aux.(GCNode)\n\t\t\tif !ok || n.StorageClass() != ClassAuto {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif elim[v] == nil {\n\t\t\t\telim[v] = n\n\t\t\t\tchanged = true\n\t\t\t}\n\t\t\treturn\n\t\tcase OpVarLive:\n\t\t\t// Don't delete the auto if it needs to be kept alive.\n\n\t\t\t// We depend on this check to keep the autotmp stack slots\n\t\t\t// for open-coded defers from being removed (since they\n\t\t\t// may not be used by the inline code, but will be used by\n\t\t\t// panic processing).\n\t\t\tn, ok := v.Aux.(GCNode)\n\t\t\tif !ok || n.StorageClass() != ClassAuto {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !used[n] {\n\t\t\t\tused[n] = true\n\t\t\t\tchanged = true\n\t\t\t}\n\t\t\treturn\n\t\tcase OpStore, OpMove, OpZero:\n\t\t\t// v should be eliminated if we eliminate the auto.\n\t\t\tn, ok := addr[args[0]]\n\t\t\tif ok && elim[v] == nil {\n\t\t\t\telim[v] = n\n\t\t\t\tchanged = true\n\t\t\t}\n\t\t\t// Other args might hold pointers to autos.\n\t\t\targs = args[1:]\n\t\t}\n\n\t\t// The code below assumes that we have handled all the ops\n\t\t// with sym effects already. Sanity check that here.\n\t\t// Ignore Args since they can't be autos.\n\t\tif v.Op.SymEffect() != SymNone && v.Op != OpArg {\n\t\t\tpanic(\"unhandled op with sym effect\")\n\t\t}\n\n\t\tif v.Uses == 0 && v.Op != OpNilCheck || len(args) == 0 {\n\t\t\t// Nil check has no use, but we need to keep it.\n\t\t\treturn\n\t\t}\n\n\t\t// If the address of the auto reaches a memory or control\n\t\t// operation not covered above then we probably need to keep it.\n\t\t// We also need to keep autos if they reach Phis (issue #26153).\n\t\tif v.Type.IsMemory() || v.Type.IsFlags() || v.Op == OpPhi || v.MemoryArg() != nil {\n\t\t\tfor _, a := range args {\n\t\t\t\tif n, ok := addr[a]; ok {\n\t\t\t\t\tif !used[n] {\n\t\t\t\t\t\tused[n] = true\n\t\t\t\t\t\tchanged = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\t// Propagate any auto addresses through v.\n\t\tnode := GCNode(nil)\n\t\tfor _, a := range args {\n\t\t\tif n, ok := addr[a]; ok && !used[n] {\n\t\t\t\tif node == nil {\n\t\t\t\t\tnode = n\n\t\t\t\t} else if node != n {\n\t\t\t\t\t// Most of the time we only see one pointer\n\t\t\t\t\t// reaching an op, but some ops can take\n\t\t\t\t\t// multiple pointers (e.g. NeqPtr, Phi etc.).\n\t\t\t\t\t// This is rare, so just propagate the first\n\t\t\t\t\t// value to keep things simple.\n\t\t\t\t\tused[n] = true\n\t\t\t\t\tchanged = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif node == nil {\n\t\t\treturn\n\t\t}\n\t\tif addr[v] == nil {\n\t\t\t// The address of an auto reaches this op.\n\t\t\taddr[v] = node\n\t\t\tchanged = true\n\t\t\treturn\n\t\t}\n\t\tif addr[v] != node {\n\t\t\t// This doesn't happen in practice, but catch it just in case.\n\t\t\tused[node] = true\n\t\t\tchanged = true\n\t\t}\n\t\treturn\n\t}\n\n\titerations := 0\n\tfor {\n\t\tif iterations == 4 {\n\t\t\t// give up\n\t\t\treturn\n\t\t}\n\t\titerations++\n\t\tchanged := false\n\t\tfor _, b := range f.Blocks {\n\t\t\tfor _, v := range b.Values {\n\t\t\t\tchanged = visit(v) || changed\n\t\t\t}\n\t\t\t// keep the auto if its address reaches a control value\n\t\t\tfor _, c := range b.ControlValues() {\n\t\t\t\tif n, ok := addr[c]; ok && !used[n] {\n\t\t\t\t\tused[n] = true\n\t\t\t\t\tchanged = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif !changed {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Eliminate stores to unread autos.\n\tfor v, n := range elim {\n\t\tif used[n] {\n\t\t\tcontinue\n\t\t}\n\t\t// replace with OpCopy\n\t\tv.SetArgs1(v.MemoryArg())\n\t\tv.Aux = nil\n\t\tv.AuxInt = 0\n\t\tv.Op = OpCopy\n\t}\n}", "func removeArraiInfo(scope rel.Scope) rel.Scope {\n\troot, _ := scope.Get(\"//\")\n\trootTuple, _ := root.(rel.Tuple)\n\n\tarrai, _ := rootTuple.Get(\"arrai\")\n\tarraiTuple, _ := arrai.(rel.Tuple)\n\n\treturn scope.With(\"//\", rootTuple.With(\"arrai\", arraiTuple.Without(\"info\")))\n}", "func (l *Libvirt) InterfaceUndefine(Iface Interface) (err error) {\n\tvar buf []byte\n\n\targs := InterfaceUndefineArgs {\n\t\tIface: Iface,\n\t}\n\n\tbuf, err = encode(&args)\n\tif err != nil {\n\t\treturn\n\t}\n\n\n\t_, err = l.requestStream(132, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func stripPCLinesAndNames(am *goobj2.ArchiveMember) {\n\tlists := [][]*goobj2.Sym{am.SymDefs, am.NonPkgSymDefs, am.NonPkgSymRefs}\n\tfor _, list := range lists {\n\t\tfor _, s := range list {\n\t\t\t// remove filename symbols when -tiny is passed as they\n\t\t\t// are only used for printing panics, and -tiny removes\n\t\t\t// panic printing; we need to set the symbol names to\n\t\t\t// 'gofile..', otherwise the linker will expect to see\n\t\t\t// filename symbols and panic\n\t\t\tif strings.HasPrefix(s.Name, \"gofile..\") {\n\t\t\t\ts.Name = \"gofile..\"\n\t\t\t}\n\n\t\t\tif s.Func == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, inl := range s.Func.InlTree {\n\t\t\t\tinl.Line = 1\n\t\t\t}\n\n\t\t\ts.Func.PCFile = nil\n\t\t\ts.Func.PCLine = nil\n\t\t\ts.Func.PCInline = nil\n\n\t\t\t// remove unneeded debug aux symbols\n\t\t\ts.Func.DwarfInfo = nil\n\t\t\ts.Func.DwarfLoc = nil\n\t\t\ts.Func.DwarfRanges = nil\n\t\t\ts.Func.DwarfDebugLines = nil\n\n\t\t}\n\t}\n\n\t// remove dwarf file list, it isn't needed as we pass \"-w, -s\" to the linker\n\tam.DWARFFileList = nil\n}", "func (f *Forest) cleanup(overshoot uint64) {\n\tfor p := f.numLeaves; p < f.numLeaves+overshoot; p++ {\n\t\tdelete(f.positionMap, f.data.read(p).Mini()) // clear position map\n\t\t// TODO ^^^^ that probably does nothing\n\t\tf.data.write(p, empty) // clear forest\n\t}\n}", "func Uninject(opts *options.Options, optionsFunc ...cliutils.OptionsFunc) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"uninject\",\n\t\tShort: \"Remove SDS & istio-proxy sidecars from gateway-proxy pod\",\n\t\tLong: \"Removes the istio-proxy sidecar from the gateway-proxy pod. \" +\n\t\t\t\"Also removes the sds sidecar from the gateway-proxy pod. \" +\n\t\t\t\"Also removes the gateway_proxy_sds cluster from the gateway-proxy envoy bootstrap ConfigMap.\",\n\t\tPreRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn nil\n\t\t},\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\terr := istioUninject(args, opts)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\tcliutils.ApplyOptions(cmd, optionsFunc)\n\treturn cmd\n}", "func removeUntouchedImpl(a, b *IdiomHistory) {\n\t// two maps ImplID -> version\n\tmapa := make(map[int]int, len(a.Implementations))\n\tfor _, impl := range a.Implementations {\n\t\tmapa[impl.Id] = impl.Version\n\t}\n\tmapb := make(map[int]int, len(b.Implementations))\n\tfor _, impl := range b.Implementations {\n\t\tmapb[impl.Id] = impl.Version\n\t}\n\t// if same version, remove from both sides\n\ttouchedA := make([]Impl, 0, len(a.Implementations))\n\tfor _, impl := range a.Implementations {\n\t\t// Keep if only in a, or in b with different version\n\t\tif mapa[impl.Id] != mapb[impl.Id] {\n\t\t\ttouchedA = append(touchedA, impl)\n\t\t}\n\t}\n\ta.Implementations = touchedA\n\ttouchedB := make([]Impl, 0, len(b.Implementations))\n\tfor _, impl := range b.Implementations {\n\t\t// Keep if only in b, or in a with different version\n\t\tif mapa[impl.Id] != mapb[impl.Id] {\n\t\t\ttouchedB = append(touchedB, impl)\n\t\t}\n\t}\n\tb.Implementations = touchedB\n\n\t// also, the two sides should have same impl order (except from creation/deletion)\n}", "func (builder *Builder) CleanUp() {\n}", "func FixDirt() {\n\tfixDirt()\n}", "func (rc *ReferencedComponents) Clean(used UsedReferences) {\n\tvar cleaned []ReferencedComponent\n\tfor _, v := range rc.Refs {\n\t\tfor k := range used.Refs {\n\t\t\tif v.Component.Id == k {\n\t\t\t\tcleaned = append(cleaned, v)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\trc.Refs = cleaned\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IsFanged Takes an IOC and returns if it is fanged. Non fanging types (bitcoin, hashes, file, cve) are always determined to not be fanged
func (ioc *IOC) IsFanged() bool { if ioc.Type == Bitcoin || ioc.Type == MD5 || ioc.Type == SHA1 || ioc.Type == SHA256 || ioc.Type == SHA512 || ioc.Type == File || ioc.Type == CVE { return false } // Basically just check if the fanged version is different from the input // This does label a partially fanged IOC is NOT fanged. I.e https://exampe[.]test.com/url is labled as NOT fanged if ioc.Fang().IOC == ioc.IOC { // They are equal, it's fanged return true } return false }
[ "func (b Bet) IsForced() bool {\n\tswitch b.Type {\n\tcase bet.Ante, bet.BringIn, bet.SmallBlind, bet.BigBlind, bet.GuestBlind, bet.Straddle:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func (me TSpeakingTypeType) IsDebate() bool { return me.String() == \"debate\" }", "func (me TxsdWorkType) IsFc() bool { return me.String() == \"FC\" }", "func (me TxsdWorkType) IsFo() bool { return me.String() == \"FO\" }", "func (me TEventTypeType) IsConvention() bool { return me.String() == \"convention\" }", "func IsDuck(s interface{}) bool {\n\t_, ok := s.(Duck)\n\treturn ok\n}", "func (me TxsdInvoiceType) IsFs() bool { return me.String() == \"FS\" }", "func (me TpositionTypes) IsContractToHire() bool { return me.String() == \"contractToHire\" }", "func (f *dummyFraudProcessor) isFraud(t *domain.Transaction, tRepo domain.TransactionHistoryRepository) (bool, error) {\n\t// Perform a dummy random fraud Check, real app would use\n\t// transaction and transactionHistoryRepository to figure this out\n\ts1 := rand.NewSource(time.Now().UnixNano())\n\tr1 := rand.New(s1)\n\t// sleep anything between 1-2 secs randomly\n\ttime.Sleep(time.Second * time.Duration(r1.Intn(3)+1))\n\n\t// choose a random number and return Flase when the number is divisible by 3\n\t// This is to return True 2/3rd of the time and return False 1/4th of the time\n\t// but still keeping the random behaviour\n\t// s2 := rand.NewSource(time.Now().UnixNano())\n\t// r2 := rand.New(s2)\n\t// num := r2.Intn(999999)\n\t// if ((num + 1) % 5) == 0 {\n\t// \treturn true, nil\n\t// }\n\tf.count = f.count + 1\n\tif f.count == 10 {\n\t\tf.count = 0\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}", "func (me TrefreshModeEnumType) IsOnChange() bool { return me == \"onChange\" }", "func (me TxsdInvoiceType) IsFt() bool { return me.String() == \"FT\" }", "func (d Deposit) IsUsual() bool {\n\tfor _, unusualType := range []string{\n\t\t\"insurance\",\n\t\t\"investment\",\n\t\t\"pension\",\n\t} {\n\t\tif d.SelectionParams.Feature == unusualType {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (dbf *DBF) BOF() bool {\n\treturn dbf.recpointer == 0\n}", "func (me TxsdDunsNumberDataTypeSimpleContentExtensionDunsNumberType) IsDomesticUltimate() bool {\n\treturn me.String() == \"domestic ultimate\"\n}", "func (_this *InputEvent) IsComposing() bool {\n\tvar ret bool\n\tvalue := _this.Value_JS.Get(\"isComposing\")\n\tret = (value).Bool()\n\treturn ret\n}", "func (me TdegreeTypes) IsBachelors() bool { return me.String() == \"bachelors\" }", "func (me TpositionTypes) IsContract() bool { return me.String() == \"contract\" }", "func (me TAssociationTypeType) IsNonProfit() bool { return me.String() == \"Non-Profit\" }", "func (me TxsdInvoiceStatus) IsF() bool { return me.String() == \"F\" }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get finds projects by tags or all projects or the project in the current directory
func (i *Index) Get(tags []string, all bool) ([]string, error) { switch { case all: err := i.clean() return i.projects(), err case len(tags) > 0: if err := i.clean(); err != nil { return []string{}, err } projectsWithTags := []string{} for _, p := range i.projects() { found, err := i.hasTags(p, tags) if err != nil { return []string{}, nil } if found { projectsWithTags = append(projectsWithTags, p) } } sort.Strings(projectsWithTags) return projectsWithTags, nil default: curProjPath, _, err := Paths() if err != nil { return []string{}, err } if _, ok := i.Projects[curProjPath]; !ok { i.add(curProjPath) if err := i.save(); err != nil { return []string{}, err } } return []string{curProjPath}, nil } }
[ "func projects(ctx context.Context) ([]string, error) {\n\tLogf(ctx, \"finding your projects...\")\n\treturn gcloud(ctx, \"projects\", \"list\", \"--format\", \"value(projectId)\")\n}", "func (s *SearchService) Projects(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) {\n\tvar ps []*Project\n\tresp, err := s.search(\"projects\", query, &ps, opt, options...)\n\treturn ps, resp, err\n}", "func (p *defaultProject) FindReleatedProjects(startPath string) []string {\n\tbasePath, _ := filepath.Split(startPath)\n\tlist := []string{}\n\n\tdirs := strings.Split(startPath, string(os.PathSeparator))\n\tif len(dirs) > 0 {\n\t\tdirs[0] = filepath.VolumeName(startPath) + string(os.PathSeparator)\n\t}\n\tfor i := 0; i <= len(dirs); i++ {\n\t\tbasePath = filepath.Join(dirs[0:i]...)\n\t\tif len(basePath) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif dirs[i] == constants.ProjectRootFolderName {\n\t\t\tfp := filepath.Join(basePath, constants.ProjectRootFolderName, \"project.yaml\")\n\t\t\tlist = append(list, fp)\n\t\t\tbreak\n\t\t}\n\n\t\tfiles, err := os.ReadDir(basePath)\n\t\t// files, err := ioutil.ReadDir(basePath)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\tbreak\n\t\t}\n\t\tfor _, f := range files {\n\n\t\t\tif f.IsDir() && f.Name() == constants.ProjectRootFolderName {\n\t\t\t\tfp := filepath.Join(basePath, f.Name(), \"project.yaml\")\n\t\t\t\tlist = append(list, fp)\n\t\t\t}\n\n\t\t}\n\n\t}\n\treturn list\n\n}", "func GetProjects(w http.ResponseWriter, r *http.Request, auth string) []Project {\n\tvar projects []Project\n\tprojectFileName := auth + globals.PROJIDFILE\n\t//First see if project already exist\n\tstatus, filepro := caching.ShouldFileCache(projectFileName, globals.PROJIDDIR)\n\tdefer filepro.Close()\n\tif status == globals.Error || status == globals.DirFail {\n\t\thttp.Error(w, \"Failed to create a file\", http.StatusInternalServerError)\n\t\treturn nil\n\t}\n\tif status == globals.Exist {\n\t\t//The file exist\n\t\t//We read from file\n\t\terr := caching.ReadFile(filepro, &projects)\n\t\tif err != nil {\n\t\t\terrmsg := \"The Failed Reading from file with error\" + err.Error()\n\t\t\thttp.Error(w, errmsg, http.StatusInternalServerError)\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\t//Else we need to query to get it\n\t\tfor i := 0; i < globals.MAXPAGE; i++ {\n\t\t\tvar subProj []Project\n\t\t\tquery := globals.GITAPI + globals.PROJQ + globals.PAGEQ + strconv.Itoa(i+1)\n\t\t\terr := apiGetCall(w, query, auth, &subProj)\n\t\t\tif err != nil {\n\t\t\t\t//The API call has failed\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t//When it's empty we no longer need to do calls\n\t\t\tif len(subProj) == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tprojects = append(projects, subProj...)\n\t\t}\n\t\tcaching.CacheStruct(filepro, projects)\n\n\t}\n\treturn projects\n}", "func GetProjects(runner Runner) ([]string, error) {\n\tcmd := exec.Command(\"oc\", \"get\", \"projects\", \"-o=jsonpath={.items[*].metadata.name}\")\n\tvar b bytes.Buffer\n\tcmd.Stdout = &b\n\tif err := runner.Run(cmd, filepath.Join(\"project-names\")); err != nil {\n\t\treturn nil, err\n\t}\n\treturn readSpaceSeparated(&b)\n}", "func findProject(w http.ResponseWriter, req *http.Request) (*gotcha.Project, error) {\n name := req.URL.Query().Get(\":projectName\")\n p, err := gotcha.LoadProject(name)\n if err != nil {\n return nil, errors.New(fmt.Sprintf(\"Project with name '%v' not found\", name))\n }\n return p, nil\n}", "func Projects() []types.ProjectConfig {\n\tvar projects []types.ProjectConfig\n\tvar projectsWithPath []types.ProjectConfig\n\trootPath := ProjectRoot()\n\tprojectsDir := viper.GetString(\"projectDirectory\")\n\n\tif err := viper.UnmarshalKey(\"projects\", &projects); err != nil {\n\t\tlog.Print(err)\n\t\tlog.Fatal(\"Error: could not parse projects from config file\")\n\t}\n\n\t// extrapolate the full repo path\n\tfor _, project := range projects {\n\t\tvar dir string\n\n\t\t// default to project name if path fragement is not configured\n\t\tif project.Path != nil {\n\t\t\tdir = *project.Path\n\t\t} else {\n\t\t\tdir = project.Name\n\t\t}\n\n\t\tfullPath := path.Join(rootPath, projectsDir, dir)\n\t\tproject.FullPath = &fullPath\n\t\tprojectsWithPath = append(projectsWithPath, project)\n\t}\n\n\treturn projectsWithPath\n}", "func GetProjects(w http.ResponseWriter, r *http.Request) {\n\t// Get IDs for projects\n\t// Grab those projects.\n\t// Return those cool projects and response\n}", "func (p *ProjectService) Get(ctx context.Context, proj string, username string) (*Project, error) {\n\tprojects, err := p.List(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not retrieve projects, error: %w\", err)\n\t}\n\n\tfor _, project := range projects {\n\t\tif project.Name == proj && project.Username == username {\n\t\t\treturn project, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"Could not find project %s for user %s. Check you're following the project\", proj, username)\n}", "func (uh *UserHandler) GetProjectsWMatchTags(w http.ResponseWriter, r *http.Request) {\n\n\tuser := uh.Authentication(r)\n\tif user == nil {\n\t\thttp.Redirect(w, r, \"/Login\", http.StatusSeeOther)\n\t\treturn\n\t}\n\n\tuid := user.UID\n\n\tprojects := uh.UService.SearchProjectWMatchTag(uid)\n\n\tjson, err := json.Marshal(projects)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tw.Write(json)\n}", "func (p *Provider) GetProjects() []string {\n\treturn p.opts.projects\n}", "func GetProjects() []m.Project {\n body, err := authenticatedGet(\"projects\")\n if err != nil {\n panic(err.Error())\n } else {\n var responseData projectResponse\n err = json.Unmarshal(body, &responseData)\n if err != nil {\n panic(err.Error())\n }\n\n return responseData.Data\n }\n}", "func (a *DefaultApiService) Projects(ctx context.Context) ([]Project, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Get\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t \tsuccessPayload []Project\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/projects\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json; charset=utf-8\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarQueryParams.Add(\"circle-token\", key)\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn successPayload, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn successPayload, localVarHttpResponse, err\n\t}\n\tdefer localVarHttpResponse.Body.Close()\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tbodyBytes, _ := ioutil.ReadAll(localVarHttpResponse.Body)\n\t\treturn successPayload, localVarHttpResponse, reportError(\"Status: %v, Body: %s\", localVarHttpResponse.Status, bodyBytes)\n\t}\n\n\tif err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil {\n\t\treturn successPayload, localVarHttpResponse, err\n\t}\n\n\n\treturn successPayload, localVarHttpResponse, err\n}", "func SearchProjects(userID int) ([]models.Project, error) {\n\to := GetOrmer()\n\tsql := `select distinct p.project_id, p.name, p.public \n\t\tfrom project p \n\t\tleft join project_member pm on p.project_id = pm.project_id \n\t\twhere (pm.user_id = ? or p.public = 1) and p.deleted = 0`\n\n\tvar projects []models.Project\n\n\tif _, err := o.Raw(sql, userID).QueryRows(&projects); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn projects, nil\n}", "func (s projectService) Get(projectsQuery ProjectsQuery) (*Projects, error) {\n\tv, _ := query.Values(projectsQuery)\n\tpath := s.BasePath\n\tencodedQueryString := v.Encode()\n\tif len(encodedQueryString) > 0 {\n\t\tpath += \"?\" + encodedQueryString\n\t}\n\n\tresp, err := apiGet(s.getClient(), new(Projects), path)\n\tif err != nil {\n\t\treturn &Projects{}, err\n\t}\n\n\treturn resp.(*Projects), nil\n}", "func (client *Client) Projects() ([]*Project, *APIResponse) {\n\tprojects := []*Project{}\n\tapiResp := client.request(http.MethodGet, \"projects\", nil, nil, &projects)\n\treturn projects, apiResp\n}", "func getRequestedProjects(names []string, all bool) ([]*ddevapp.DdevApp, error) {\n\trequestedProjects := make([]*ddevapp.DdevApp, 0)\n\n\t// If no project is specified, return the current project\n\tif len(names) == 0 && !all {\n\t\tproject, err := ddevapp.GetActiveApp(\"\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn append(requestedProjects, project), nil\n\t}\n\n\tallDockerProjects := ddevapp.GetDockerProjects()\n\n\t// If all projects are requested, return here\n\tif all {\n\t\treturn allDockerProjects, nil\n\t}\n\n\t// Convert all projects slice into map indexed by project name to prevent duplication\n\tallDockerProjectMap := map[string]*ddevapp.DdevApp{}\n\tfor _, project := range allDockerProjects {\n\t\tallDockerProjectMap[project.Name] = project\n\t}\n\n\t// Select requested projects\n\trequestedProjectsMap := map[string]*ddevapp.DdevApp{}\n\tfor _, name := range names {\n\t\tvar exists bool\n\t\t// If the requested project name is found in the docker map, OK\n\t\t// If not, if we find it in the globl project list, OK\n\t\t// Otherwise, error.\n\t\tif requestedProjectsMap[name], exists = allDockerProjectMap[name]; !exists {\n\t\t\tif _, exists = globalconfig.DdevGlobalConfig.ProjectList[name]; exists {\n\t\t\t\trequestedProjectsMap[name] = &ddevapp.DdevApp{Name: name}\n\t\t\t} else {\n\t\t\t\treturn nil, fmt.Errorf(\"could not find requested project %s\", name)\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// Convert map back to slice\n\tfor _, project := range requestedProjectsMap {\n\t\trequestedProjects = append(requestedProjects, project)\n\t}\n\n\treturn requestedProjects, nil\n}", "func (mong *MongoStore) QueryProjects(uuid string, name string) ([]QProject, error) {\n\n\tquery := bson.M{}\n\n\tif name != \"\" {\n\n\t\tquery = bson.M{\"name\": name}\n\n\t} else if uuid != \"\" {\n\t\tquery = bson.M{\"uuid\": uuid}\n\t}\n\n\tdb := mong.Session.DB(mong.Database)\n\tc := db.C(\"projects\")\n\tvar results []QProject\n\terr := c.Find(query).All(&results)\n\tif err != nil {\n\t\tlog.Fatal(\"STORE\", \"\\t\", err.Error())\n\t}\n\n\tif len(results) > 0 {\n\t\treturn results, nil\n\t}\n\n\treturn results, errors.New(\"not found\")\n}", "func (m *manager) List(query ...*models.ProjectQueryParam) ([]*models.Project, error) {\n\tvar q *models.ProjectQueryParam\n\tif len(query) > 0 {\n\t\tq = query[0]\n\t}\n\treturn dao.GetProjects(q)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
loginAttempt increments the number of login attempts in sessions variable
func loginAttempt(sess *sessions.Session) { // Log the attempt if sess.Values[sessLoginAttempt] == nil { sess.Values[sessLoginAttempt] = 1 } else { sess.Values[sessLoginAttempt] = sess.Values[sessLoginAttempt].(int) + 1 } }
[ "func AuthenticateLoginAttempt(r *http.Request) *sessions.Session {\n\tvar userid string\n\tlog.Println(\"Authenticating Login credentials.\")\n\tattemptEmail := template.HTMLEscapeString(r.Form.Get(\"email\")) //Escape special characters for security.\n\tattemptPassword := template.HTMLEscapeString(r.Form.Get(\"password\")) //Escape special characters for security.\n\tlog.Println(\"Attempt email :\", attemptEmail, \"Attempt Password:\", attemptPassword)\n\trow := databases.GlobalDBM[\"mydb\"].Con.QueryRow(\"SELECT userid FROM user WHERE email = '\" + attemptEmail + \"' AND password = '\" + attemptPassword + \"'\")\n\terr := row.Scan(&userid)\n\tif err != nil { // User does not exist.\n\t\tlog.Println(\"User authentication failed.\")\n\t\treturn &sessions.Session{Status: sessions.DELETED}\n\t}\n\t//User exists.\n\tlog.Println(\"User authentication successful. Creating new Session.\")\n\treturn sessions.GlobalSM[\"usersm\"].SetSession(userid, time.Hour*24*3) // Session lives in DB for 3 days.\n}", "func (l LoginForm) LoginAttempt() {\n\tdb.Create(l)\n}", "func (mgr *SessionManager) updateFailureCount(username string, failed bool) {\n\n\tfailures := mgr.GetLoginFailures()\n\n\t// Expire old entries in the cache if we have a failure window defined.\n\tif window := getLoginFailureWindow(); window > 0 {\n\t\tcount := expireOldFailedAttempts(window, &failures)\n\t\tif count > 0 {\n\t\t\tlog.Infof(\"Expired %d entries from session cache due to max age reached\", count)\n\t\t}\n\t}\n\n\t// If we exceed a certain cache size, we need to remove random entries to\n\t// prevent overbloating the cache with fake entries, as this could lead to\n\t// memory exhaustion and ultimately in a DoS. We remove a single entry to\n\t// replace it with the new one.\n\t//\n\t// Chances are that we remove the one that is under active attack, but this\n\t// chance is low (1:cache_size)\n\tif failed && len(failures) >= getMaximumCacheSize() {\n\t\tlog.Warnf(\"Session cache size exceeds %d entries, removing random entry\", getMaximumCacheSize())\n\t\tidx := rand.Intn(len(failures) - 1)\n\t\tvar rmUser string\n\t\ti := 0\n\t\tfor key := range failures {\n\t\t\tif i == idx {\n\t\t\t\trmUser = key\n\t\t\t\tdelete(failures, key)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t\tlog.Infof(\"Deleted entry for user %s from cache\", rmUser)\n\t}\n\n\tattempt, ok := failures[username]\n\tif !ok {\n\t\tattempt = LoginAttempts{FailCount: 0}\n\t}\n\n\t// On login failure, increase fail count and update last failed timestamp.\n\t// On login success, remove the entry from the cache.\n\tif failed {\n\t\tattempt.FailCount += 1\n\t\tattempt.LastFailed = time.Now()\n\t\tfailures[username] = attempt\n\t\tlog.Warnf(\"User %s failed login %d time(s)\", username, attempt.FailCount)\n\t} else {\n\t\tif attempt.FailCount > 0 {\n\t\t\t// Forget username for cache size enforcement, since entry in cache was deleted\n\t\t\tdelete(failures, username)\n\t\t}\n\t}\n\n\terr := mgr.storage.SetLoginAttempts(failures)\n\tif err != nil {\n\t\tlog.Errorf(\"Could not update login attempts: %v\", err)\n\t}\n\n}", "func (dc *DatadogCollector) IncrementAttempts() {\n\t_ = dc.client.Count(dmAttempts, 1, dc.tags, 1.0)\n}", "func (u *Updater) Login(playerGUID interfaces.GUID, session *Session) error {\n\tu.sessionsLock.Lock()\n\tdefer u.sessionsLock.Unlock()\n\n\tif currPlayerGUID, ok := u.sessions[playerGUID]; ok {\n\t\tu.log.WithFields(logrus.Fields{\n\t\t\t\"current_player\": currPlayerGUID,\n\t\t\t\"new_player\": playerGUID,\n\t\t}).Warningf(\"Tried to log in with multiple characters\")\n\t\treturn fmt.Errorf(\"player with GUID %v is already logged in\", playerGUID)\n\t}\n\n\tu.sessions[playerGUID] = &loginData{\n\t\tSession: session,\n\t\tUpdateCache: make(map[interfaces.GUID]*updateCache),\n\t}\n\n\tu.log.Tracef(\"Registered GUID=%v\", playerGUID)\n\n\t// Mark the player as logged in.\n\tu.om.GetPlayer(playerGUID).IsLoggedIn = true\n\tchannels.ObjectUpdates <- playerGUID\n\n\treturn nil\n}", "func (uService *UserService) saveLoginAttempt(successful bool, user *model.UserModel) {\n\n\tif successful {\n\t\tuser.UnsuccessfulLoginAttempts = 0\n\t\tuser.LastLoginDate = time.Now()\n\t} else {\n\t\tappConfig := config.GetInstance()\n\t\tuser.UnsuccessfulLoginAttempts++\n\t\tif user.UnsuccessfulLoginAttempts >= appConfig.MaxUnsuccessfulLoginAttemptsAllowed {\n\t\t\tuser.AccountLocked = true\n\t\t}\n\t}\n\tuserDAO := dao.GetUserDAOInstance()\n\terr := userDAO.SaveUser(user)\n\tif err != nil {\n\t\tlog.Warning(err.Error())\n\t}\n}", "func (_m *MetricsInterface) IncrementLogin() {\n\t_m.Called()\n}", "func loginHandler(a *appContext, w http.ResponseWriter, r *http.Request) (int, error) {\n\ta.authsession = a.authprovider.StartSession(newState())\n\thttp.Redirect(w, r, a.authsession.AuthURL, http.StatusFound)\n\treturn http.StatusFound, nil\n}", "func (atics *Analytics) UserLoginAttempt(successful bool, username string, message string) {\n\n\tproducer, err := atics.newKafkaProducer()\n\n\tif err != nil {\n\t\tlog.Warning(\"Error creating kafka producer.\")\n\t\treturn\n\t}\n\n\tdefer producer.Close()\n\n\t// Delivery report handler for produced messages\n\tgo func() {\n\t\tfor events := range producer.Events() {\n\t\t\tswitch ev := events.(type) {\n\t\t\tcase *kafka.Message:\n\t\t\t\tif ev.TopicPartition.Error != nil {\n\t\t\t\t\tlog.Warning(\"Delivery failed: \", ev.TopicPartition)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Warning(\"Delivered message to topic: \", ev.TopicPartition)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\tmMessage, mErr := atics.createAnalyticsMessage(successful, username, message)\n\tif mErr != nil {\n\t\treturn\n\t}\n\n\tproducer.Produce(&kafka.Message{\n\t\tTopicPartition: kafka.TopicPartition{Topic: &atics.loginKafkaTopic, Partition: kafka.PartitionAny},\n\t\tValue: mMessage,\n\t}, nil)\n\n\t// Wait for message deliveries before shutting down\n\tproducer.Flush(15 * 1000)\n\n}", "func (mgr *SessionManager) getFailureCount(username string) LoginAttempts {\n\tfailures := mgr.GetLoginFailures()\n\tattempt, ok := failures[username]\n\tif !ok {\n\t\tattempt = LoginAttempts{FailCount: 0}\n\t}\n\treturn attempt\n}", "func (u *MockUserRecord) NumLoginDays() int { return 0 }", "func (m *TeamsAsyncOperation) SetAttemptsCount(value *int32)() {\n err := m.GetBackingStore().Set(\"attemptsCount\", value)\n if err != nil {\n panic(err)\n }\n}", "func (*AgendaAtomicService) LoginAndGetSessionID(\n\tusername string, password string) (string, int, LoginResponse) {\n\t// ---- check username and password ----\n\tif username == \"\" || password == \"\" { // check if empty username and password\n\t\treturn \"\", http.StatusBadRequest, LoginResponse{EmptyUsernameOrPassword, -1}\n\t}\n\tpassword = tools.MD5Encryption(password)\n\tdao := agendaDao{xormEngine}\n\tuser := User{UserName: username, Password: password}\n\thas, err := dao.ifUserExistByConditions(&user) // check if exist\n\tif err != nil { // server error\n\t\treturn \"\", http.StatusInternalServerError, LoginResponse{ServerError, -1}\n\t}\n\tif !has { // user not exist\n\t\treturn \"\", http.StatusUnauthorized, LoginResponse{IncorrectUsernameAndPassword, -1}\n\t}\n\t// ---- get new sessionID ----\n\tvar sessionID = tools.GenenrateSessionID() // generate new sessionID\n\tfor { // make sure sessionID unique\n\t\tif has, err = dao.ifUserExistByConditions(&User{SessionID: sessionID}); err == nil && !has {\n\t\t\tbreak\n\t\t}\n\t\tsessionID = tools.GenenrateSessionID()\n\t}\n\taffected, _ := dao.updateUser(&User{SessionID: sessionID}, &User{UserName: username, Password: password})\n\tif affected == 0 { // user not exist\n\t\treturn \"\", http.StatusUnauthorized, LoginResponse{IncorrectUsernameAndPassword, -1}\n\t}\n\treturn sessionID, http.StatusOK, LoginResponse{LoginSucceed, user.ID}\n}", "func (d *Dao) SecurityLoginCount(c context.Context, index int64, reason string, stime, etime time.Time) (res int64, err error) {\n\trow := d.db.QueryRow(c, fmt.Sprintf(_securityLoginCountSQL, hitHistory(index)), reason, stime, etime)\n\tif err = row.Scan(&res); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\terr = nil\n\t\t} else {\n\t\t\tlog.Error(\"row.Scan error(%v)\", err)\n\t\t}\n\t}\n\treturn\n}", "func (_m *MetricsInterface) IncrementLoginFail() {\n\t_m.Called()\n}", "func (querier *ModelQuerier) AttemptLogin(username, password string) (*User, error) {\n\tuser := querier.FindUser(username)\n\tif user == nil {\n\t\treturn nil, errors.New(\"Couldn't find user with that username\")\n\t}\n\terr :=\n\t\tbcrypt.CompareHashAndPassword([]byte(user.PasswordHash), []byte(password))\n\tif err != nil {\n\t\treturn nil, errors.New(\"Password was invalid\")\n\t}\n\treturn user, nil\n}", "func (m *TeamsAsyncOperation) SetAttemptsCount(value *int32)() {\n m.attemptsCount = value\n}", "func (cl *APIClient) Login(params ...string) *R.Response {\n\totp := \"\"\n\tif len(params) > 0 {\n\t\totp = params[0]\n\t}\n\tcl.SetOTP(otp)\n\trr := cl.Request(map[string]string{\"COMMAND\": \"StartSession\"})\n\tif rr.IsSuccess() {\n\t\tcol := rr.GetColumn(\"SESSION\")\n\t\tif col != nil {\n\t\t\tcl.SetSession(col.GetData()[0])\n\t\t} else {\n\t\t\tcl.SetSession(\"\")\n\t\t}\n\t}\n\treturn rr\n}", "func Test_Login_MultiLogin(t *testing.T) {\n\tgSession = nil\n\tsession1, err := login(TestValidUser)\n\tif session1 == nil || err != nil {\n\t\tt.Error(\"fail at login\")\n\t}\n\tsession2, err := login(TestValidUser)\n\tif err != nil {\n\t\tt.Error(\"fail at login\")\n\t}\n\tif session1 != session2 {\n\t\tt.Error(\"multi login should get same session\")\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
jdecrypt private function to "decrypt" password
func jdecrypt( stCuen string , stPass string)(stRes string,err error){ var stEnc []byte stEnc, err = base64.StdEncoding.DecodeString(stPass) if err != nil { log.Println("jdecrypt ", stPass, err ) } else{ lon := len(stEnc) lan := len(stCuen) if lon > lan { stCuen = PadRight(stCuen, " ", lon) } rCuen := []byte(stCuen) rEnc := []byte(stEnc) rRes := make([]byte, lon) for i := 0; i < lon; i++ { rRes[i] = rCuen[i] ^ rEnc[i] } stRes = string(rRes) } return }
[ "func DecryptJasypt(encrypted []byte, password string) ([]byte, error) {\n\tif len(encrypted) < des.BlockSize {\n\t\treturn nil, fmt.Errorf(\"Invalid encrypted text. Text length than block size.\")\n\t}\n\n\tsalt := encrypted[:des.BlockSize]\n\tct := encrypted[des.BlockSize:]\n\n\tkey, err := PBKDF1MD5([]byte(password), salt, 1000, des.BlockSize*2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tiv := key[des.BlockSize:]\n\tkey = key[:des.BlockSize]\n\n\tb, err := des.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdst := make([]byte, len(ct))\n\tbm := cipher.NewCBCDecrypter(b, iv)\n\tbm.CryptBlocks(dst, ct)\n\n\t// Remove any padding\n\tpad := int(dst[len(dst)-1])\n\tdst = dst[:len(dst)-pad]\n\n\treturn dst, nil\n}", "func encryptPassword(password string) {\n\n}", "func CipherDecodeJNI(s *C.char, ts C.long) *C.char {\n\ttm := time.Unix(int64(ts), 0)\n\tdec := C.GoString(s)\n\n\tdecoder := dhcrypto.NewCipherDecode([]byte(key), tm)\n\tbytes, e := decoder.Decode(dec)\n\tif e != nil {\n\t\tfmt.Println(\"error:\", e)\n\t\treturn nil\n\t}\n\tstr := string(bytes)\n\tfmt.Println(\"decoded:\", str)\n\treturn C.CString(str)\n}", "func Initdecrypt(hsecretfile string, mkeyfile string) []byte {\n\n\thudsonsecret, err := ioutil.ReadFile(hsecretfile)\n\tif err != nil {\n\t\tfmt.Printf(\"error reading hudson.util.Secret file '%s':%s\\n\", hsecretfile, err)\n\t\tos.Exit(1)\n\t}\n\n\tmasterkey, err := ioutil.ReadFile(mkeyfile)\n\tif err != nil {\n\t\tfmt.Printf(\"error reading master.key file '%s':%s\\n\", mkeyfile, err)\n\t\tos.Exit(1)\n\t}\n\n\tk, err := Decryptmasterkey(string(masterkey), hudsonsecret)\n\tif err != nil {\n\t\tfmt.Println(\"Error decrypting keys... \", err)\n\t\tos.Exit(1)\n\t}\n\treturn k\n}", "func decryptPassword(encryptedPassword EncryptedPassword) (decryptedPassword Password, err error) {\n\treturn Password{URL: encryptedPassword.URL, Email: encryptedPassword.Email, Password: encryptedPassword.Password}, nil\n}", "func Decrypt(key []byte) []byte {\n\tdecryptedbin, err := dpapi.DecryptBytes(key)\n\terror_log.Check(err, \"Unprotect String with DPAPI\", \"decrypter\")\n\treturn decryptedbin\n}", "func (d Decryptor) Decrypt(bs []byte) ([]byte, error) {\n\tswitch d.Algorithm {\n\tcase \"\", AlgoPBEWithMD5AndDES:\n\t\tif d.Password == \"\" {\n\t\t\treturn nil, ErrEmptyPassword\n\t\t}\n\t\treturn DecryptJasypt(bs, d.Password)\n\t}\n\treturn nil, fmt.Errorf(\"unknown jasypt algorithm\")\n}", "func TestDecodePassword(t *testing.T) {\n\tt.Parallel()\n\n\tpasswordDecoded, err := junosdecode.Decode(junWordCoded)\n\tif err != nil {\n\t\tt.Errorf(\"error on decode %v\", err)\n\t}\n\tif passwordDecoded != junWordDecoded {\n\t\tt.Errorf(\"decode password failed\")\n\t}\n}", "func decryptJWE(jweString string, key []byte) (messages.Base, error) {\n\tif core.Debug {\n\t\tmessage(\"debug\", \"Entering into http2.DecryptJWE function\")\n\t\tmessage(\"debug\", fmt.Sprintf(\"Input JWE String: %s\", jweString))\n\t}\n\n\tvar m messages.Base\n\n\t// Parse JWE string back into JSONWebEncryption\n\tjwe, errObject := jose.ParseEncrypted(jweString)\n\tif errObject != nil {\n\t\treturn m, fmt.Errorf(\"there was an error parseing the JWE string into a JSONWebEncryption object:\\r\\n%s\", errObject)\n\t}\n\n\tif core.Debug {\n\t\tmessage(\"debug\", fmt.Sprintf(\"Parsed JWE:\\r\\n%+v\", jwe))\n\t}\n\n\t// Decrypt the JWE\n\tjweMessage, errDecrypt := jwe.Decrypt(key)\n\tif errDecrypt != nil {\n\t\treturn m, fmt.Errorf(\"there was an error decrypting the JWE:\\r\\n%s\", errDecrypt.Error())\n\t}\n\n\t// Decode the JWE payload into a messages.Base struct\n\terrDecode := gob.NewDecoder(bytes.NewReader(jweMessage)).Decode(&m)\n\tif errDecode != nil {\n\t\treturn m, fmt.Errorf(\"there was an error decoding JWE payload message sent by an agent:\\r\\n%s\", errDecode.Error())\n\t}\n\n\tif core.Debug {\n\t\tmessage(\"debug\", \"Leaving http2.DecryptJWE function without error\")\n\t\tmessage(\"debug\", fmt.Sprintf(\"Returning message base: %+v\", m))\n\t}\n\treturn m, nil\n}", "func passwordKey() []byte {\n\ta0 := []byte{0x90, 0x19, 0x14, 0xa0, 0x94, 0x23, 0xb1, 0xa4, 0x98, 0x27, 0xb5, 0xa8, 0xd3, 0x31, 0xb9, 0xe2}\n\ta1 := []byte{0x10, 0x91, 0x20, 0x15, 0xa1, 0x95, 0x24, 0xb2, 0xa5, 0x99, 0x28, 0xb6, 0xa9, 0xd4, 0x32, 0xf1}\n\ta2 := []byte{0x12, 0x11, 0x92, 0x21, 0x16, 0xa2, 0x96, 0x25, 0xb3, 0xa6, 0xd1, 0x29, 0xb7, 0xe0, 0xd5, 0x33}\n\ta3 := []byte{0x18, 0x13, 0x12, 0x93, 0x22, 0x17, 0xa3, 0x97, 0x26, 0xb4, 0xa7, 0xd2, 0x30, 0xb8, 0xe1, 0xd6}\n\n\tresult := make([]byte, 16)\n\tfor i := 0; i < len(result); i++ {\n\t\tresult[i] = (((a0[i] & a1[i]) ^ a2[i]) | a3[i])\n\t}\n\n\treturn result\n}", "func decrypt(fileName string, cipherText string) (plainText string, err error) {\n\n\t// Trim newlines / whitespaces from the plaintext\n\tcipherText = strings.TrimSpace(cipherText)\n\tcipherBytes, err := base64.StdEncoding.DecodeString(cipherText)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Open the OTP file\n\tfd, err := os.OpenFile(fileName, os.O_RDWR, 0666)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Set the OTP & ciphertext buffer to the same length as the plaintext\n\tcipherTextLength := int64(len(cipherBytes))\n\toneTimePad := make([]byte, cipherTextLength)\n\tplainTextBytes := make([]byte, cipherTextLength)\n\n\t// Move the offset to EOF - the length of the plaintext\n\toffset, err := fd.Seek(-cipherTextLength, os.SEEK_END)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t// Read in the OTP\n\tfd.Read(oneTimePad)\n\n\t// Remove the used OTP from the file\n\terr = fd.Truncate(offset)\n\t// Close it\n\tdefer fd.Close()\n\n\t// XOR (instead of modular addition) the ciphertext with the OTP\n\tfor i, cipherTextByte := range cipherBytes {\n\t\tplainTextBytes[i] = byte(cipherTextByte) ^ oneTimePad[i]\n\t}\n\n\t// Return the the decrypted plaintext\n\treturn string(plainTextBytes), nil\n}", "func\tDecryptPasswordHash(argon2Hash, argon2IV, scryptHash, scryptIV []byte) ([]byte, []byte, error) {\n\t/**************************************************************************\n\t**\tGet the master key from the .env file\n\t**************************************************************************/\n\tMasterKey, err := base64.RawStdEncoding.DecodeString(os.Getenv(\"MASTER_KEY\"))\n\tif (err != nil) {\n\t\tlogs.Error(err)\n\t\treturn nil, nil, err\n\t}\n\n\t/**************************************************************************\n\t**\tCreate a Cipher with the master key\n\t**************************************************************************/\n\tblock, err := aes.NewCipher(MasterKey)\n\tif (err != nil) {\n\t\tlogs.Error(err)\n\t\treturn nil, nil, err\n\t}\n\n\t/**************************************************************************\n\t**\tDecrypt the ciphertext\n\t**************************************************************************/\n\targon2UnHash := make([]byte, len(argon2Hash))\n\targon2Dec := cipher.NewCBCDecrypter(block, argon2IV)\n\targon2Dec.CryptBlocks(argon2UnHash, argon2Hash)\n\n\tscryptUnHash := make([]byte, len(scryptHash))\n\tscryptDec := cipher.NewCBCDecrypter(block, scryptIV)\n\tscryptDec.CryptBlocks(scryptUnHash, scryptHash)\n\n\t/**************************************************************************\n\t**\tUnpad the result\n\t**************************************************************************/\n\targon2UnHash, _ = pkcs7Unpad(argon2UnHash, aes.BlockSize)\n\tscryptUnHash, _ = pkcs7Unpad(scryptUnHash, aes.BlockSize)\n\n\treturn argon2UnHash, scryptUnHash, nil\n}", "func deobfuscate(in string, key []byte) ([]byte, error) {\n\tif len(key) == 0 {\n\t\treturn nil, errors.New(\"key cannot be zero length\")\n\t}\n\n\tdecoded, err := base64.StdEncoding.DecodeString(in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := make([]byte, len(decoded))\n\tfor i, c := range decoded {\n\t\tout[i] = c ^ key[i%len(key)]\n\t}\n\n\treturn out, nil\n}", "func getUnscrambledPassword(password string, scrambleKey string) string {\n\tdecodedString, _ := base64.StdEncoding.DecodeString(password)\n\tunscrambledPassword := xorString(string(decodedString), scrambleKey)\n\treturn unscrambledPassword\n}", "func Decrypt(password []byte, decryptionKey string, ttl int) (string, error) {\n\tk, err := fernet.DecodeKeys(decryptionKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tmessage := fernet.VerifyAndDecrypt(password, time.Duration(ttl)*time.Second, k)\n\treturn string(message), err\n}", "func decryptJWEToken(token string) ([]byte, error) {\n\te, err := jose.ParseEncrypted(token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpayload, err := e.Decrypt(privateKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn decodeJWSToken(string(payload))\n}", "func PwDecrypt(encrypted, byteSecret []byte) (string, error) {\n\n\tvar secretKey [32]byte\n\tcopy(secretKey[:], byteSecret)\n\n\tvar decryptNonce [24]byte\n\tcopy(decryptNonce[:], encrypted[:24])\n\tdecrypted, ok := secretbox.Open(nil, encrypted[24:], &decryptNonce, &secretKey)\n\tif !ok {\n\t\treturn \"\", errors.New(\"PwDecrypt(secretbox.Open)\")\n\t}\n\n\treturn string(decrypted), nil\n}", "func decryptJWE(jweString string, key []byte) (messages.Base, error) {\n\tvar m messages.Base\n\n\t// Parse JWE string back into JSONWebEncryption\n\tjwe, errObject := jose.ParseEncrypted(jweString)\n\tif errObject != nil {\n\t\treturn m, fmt.Errorf(\"there was an error parseing the JWE string into a JSONWebEncryption object:\\r\\n%s\", errObject)\n\t}\n\n\t// Decrypt the JWE\n\tjweMessage, errDecrypt := jwe.Decrypt(key)\n\tif errDecrypt != nil {\n\t\treturn m, fmt.Errorf(\"there was an error decrypting the JWE:\\r\\n%s\", errDecrypt.Error())\n\t}\n\n\t// Decode the JWE payload into a messages.Base struct\n\terrDecode := gob.NewDecoder(bytes.NewReader(jweMessage)).Decode(&m)\n\tif errDecode != nil {\n\t\treturn m, fmt.Errorf(\"there was an error decoding JWE payload message sent by an agent:\\r\\n%s\", errDecode.Error())\n\t}\n\n\treturn m, nil\n}", "func (q MockService) Decrypt(encKey string, envVal string) (result string, err error) {\n\tresult = \"Q_Qesb1Z2hA7H94iXu3_buJeQ7416\"\n\terr = nil\n\n\treturn result, err\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
JLoginGET service to return persons data
func JLoginGET(w http.ResponseWriter, r *http.Request) { var params httprouter.Params sess := model.Instance(r) v := view.New(r) v.Vars["token"] = csrfbanana.Token(w, r, sess) params = context.Get(r, "params").(httprouter.Params) cuenta := params.ByName("cuenta") password := params.ByName("password") stEnc, _ := base64.StdEncoding.DecodeString(password) password = string(stEnc) var jpers model.Jperson jpers.Cuenta = cuenta pass, err := (&jpers).JPersByCuenta() if err == model.ErrNoResult { loginAttempt(sess) } else { b:= passhash.MatchString(pass, password) if b && jpers.Nivel > 0{ var js []byte js, err = json.Marshal(jpers) if err == nil{ model.Empty(sess) sess.Values["id"] = jpers.Id sess.Save(r, w) w.Header().Set("Content-Type", "application/json") w.Write(js) return } } } log.Println(err) // http.Error(w, err.Error(), http.StatusBadRequest) http.Error(w, err.Error(), http.StatusInternalServerError) return }
[ "func GetData(accessToken string, w http.ResponseWriter, r *http.Request) {\n\trequest, err := http.NewRequest(\"GET\", \"https://auth.vatsim.net/api/user\", nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\trequest.Header.Add(\"Bearer\", accessToken)\n\trequest.Header.Add(\"accept\", \"application/json\")\n\tclient := http.Client{}\n\tclient.Do(request)\n\n\tdefer request.Body.Close()\n\n\tbody, errReading := ioutil.ReadAll(request.Body)\n\tif errReading != nil {\n\t\tlog.Fatal(errReading)\n\t}\n\n\n\tvar userDetails map[string]interface{}\n\terrJSON := json.Unmarshal(body, &userDetails)\n\tif errJSON != nil {\n\t\tlog.Fatal(errJSON)\n\t}\n\tfmt.Println(userDetails)\n}", "func GetLogInUser(key string, result interface{}, request *http.Request) (err error) {\r\n\tbytes := []byte(request.Header.Get(key))\r\n\r\n\terr = json.Unmarshal(bytes, result)\r\n\tif err != nil {\r\n\t\tLogger.Println(\"Not able to find the Login User!!\")\r\n\t\t// http.Redirect(writer, request, \"/\", 302)\r\n\t\t// return\r\n\t}\r\n\r\n\treturn\r\n}", "func getUser(w http.ResponseWriter, r *http.Request){\n\n\t\tu := User{}\n\t\tu.Name = chi.URLParam(r, \"name\")\n\t\n\t\t//checks if user already exists\n\t\tuser := userExist(u.Name)\n\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(200)\n\t\tjson.NewEncoder(w).Encode(user)\n\n}", "func LoginGET(w http.ResponseWriter, r *http.Request) {\n\tsess := model.Instance(r)\n\tv := view.New(r)\n\tv.Name = \"login/login\"\n\tv.Vars[\"token\"] = csrfbanana.Token(w, r, sess)\n\t// Refill any form fields\n\tview.Repopulate([]string{\"cuenta\",\"password\"}, r.Form, v.Vars)\n\tv.Render(w)\n }", "func login () *httptest.ResponseRecorder {\n\treq, _ := http.NewRequest(\"GET\", \"/api/v.0.0.1/get-token?uname=\" + USER_NAME + \"&upass=xxx\", nil)\n\tresponse := executeRequest(req)\n\treturn response\n}", "func getPerson(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tfmt.Println(\"GET HIT\")\n\tvar persons []Person\n\tresult, err := db.Query(\"SELECT * FROM Persons\")\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tdefer result.Close()\n\tfor result.Next() {\n\t\tvar person Person\n\t\terr := result.Scan(&person.Age, &person.Name)\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\tpersons = append(persons, person)\n\t}\n\tfmt.Println(\"Response from db\", persons)\n\tjson.NewEncoder(w).Encode(persons)\n}", "func getUserData(w http.ResponseWriter, r *http.Request) {\n\t// Get request query value.\n\tif r.Method != http.MethodGet {\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\tfmt.Fprintf(w, \"invalid_http_method\")\n\t\treturn\n\t}\n\tinputEmail := r.URL.Query().Get(\"email\")\n\n\tif inputEmail == \"\" {\n\t\thttp.Error(w, \"Can't get value.\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Check from database.\n\tqueryStr := \"SELECT * FROM user WHERE email='\" + inputEmail + \"';\"\n\trets, err := dbconn.DBConn.Query(queryStr)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Send back json data.\n\tfor rets.Next() {\n\t\tvar userdata database.UserType\n\t\tif err = rets.Scan(&userdata.ID, &userdata.Name, &userdata.Email, &userdata.Passwd); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadGateway)\n\t\t}\n\n\t\tuserJson, err := json.Marshal(userdata)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write(userJson)\n\t}\n}", "func LoginGET(c *gin.Context) {\n\tc.HTML(http.StatusOK, \"login.html\", gin.H{})\n}", "func (h *handler) Get(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\tusername, err := request.UsernameOf(r)\n\tif err != nil {\n\t\thttperror.InternalServerError(w, err)\n\t\treturn\n\t}\n\n\tdaoAccount := h.app.Dao.Account() // domain/repository の取得\n\taccount, err := daoAccount.FindByUsername(ctx, username)\n\n\tif err != nil {\n\t\thttperror.InternalServerError(w, err)\n\t\treturn\n\t}\n\n\tif account == nil {\n\t\terr := errors.New(\"user not found\")\n println(err.Error())\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tif err := json.NewEncoder(w).Encode(account); err != nil {\n\t\thttperror.InternalServerError(w, err)\n\t\treturn\n\t}\n}", "func getPerson(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tparams := mux.Vars(r)\n\tfor _, item := range people {\n\t\tif item.Socialsecurity == params[\"socialsecurity\"] {\n\t\t\tjson.NewEncoder(w).Encode(item)\n\t\t\treturn\n\t\t}\n\t}\n\tjson.NewEncoder(w).Encode(people)\n}", "func get(u string, client *http.Client, username, password string) ([]byte, error) {\n\treq, err := http.NewRequest(http.MethodGet, u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif username != \"\" {\n\t\treq.SetBasicAuth(username, password)\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"%s: %s\", u, resp.Status)\n\t}\n\n\treturn io.ReadAll(resp.Body)\n}", "func GetPerson(c *gin.Context) {\n\tid := c.Params.ByName(\"id\")\n\tvar person User\n\tif err := db.Where(\"id = ?\", id).First(&person).Error; err != nil {\n\t\tc.AbortWithStatus(404)\n\t\tfmt.Println(err)\n\t} else {\n\t\tc.Header(\"access-control-allow-origin\", \"*\") // Why am I doing this? Find out. Try running with this line commented\n\t\tc.JSON(200, person)\n\t}\n}", "func GetPerson(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\n\tvar person model.Person\n\n\t// Fetch user from db.\n\tif id := params[\"id\"]; len(id) > 0 {\n\t\tid, err := strconv.Atoi(id)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"can not convert from string to int: %v\\n\", err)\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(errors.ErrorMsg{\"json decode failed\"})\n\t\t\treturn\n\t\t}\n\n\t\tvar db = database.DB()\n\n\t\tq := db.First(&person, id)\n\t\tif q.RecordNotFound() {\n\t\t\tfmt.Printf(\"record not found: %v\\n\", err)\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\tjson.NewEncoder(w).Encode(errors.ErrorMsg{\"record not found\"})\n\t\t\treturn\n\t\t} else if q.Error != nil {\n\t\t\tfmt.Printf(\"can not convert from string to int: %v\\n\", err)\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tjson.NewEncoder(w).Encode(errors.ErrorMsg{\"json decode failed\"})\n\t\t\treturn\n\t\t}\n\n\t\t// Success\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tjson.NewEncoder(w).Encode(&person)\n\t}\n\n}", "func SearchUserT(resp http.ResponseWriter, req *http.Request) {\n\tfirstName, ok := req.URL.Query()[\"firstname\"]\n\tif !ok {\n\t\tfmt.Println(\"Url Param 'firstname' is missing\")\n\t}\n\n\tsecondName, ok := req.URL.Query()[\"secondname\"]\n\tif !ok {\n\t\tfmt.Println(\"Url Param 'secondname' is missing\")\n\t}\n\t//fmt.Println(firstName, secondName)\n\n\tusers := model.TarantoolUserSearch(svc.Tarantool, firstName[0], secondName[0])\n\n\tjs, err := json.Marshal(users)\n\tif err != nil {\n\t\tfmt.Println(\"Users marshalling error\")\n\t}\n\n\tresp.Write(js)\n}", "func getSpecificPersons(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tfmt.Println(\"Get Specific HIT\")\n\tparams := mux.Vars(r)\n\tresult, err := db.Query(\"SELECT pAge,pName FROM Persons WHERE pAge >= ?\", params[\"age\"])\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tdefer result.Close()\n\tvar pers []Person\n\tfor result.Next() {\n\t\tvar per Person\n\t\terr := result.Scan(&per.Age, &per.Name)\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\tpers = append(pers, per)\n\t}\n\tjson.NewEncoder(w).Encode(pers)\n}", "func getLoginInfo(r *http.Request) *LoginInfo {\n\tsession, _ := store.Get(r, SessionName)\n\temail := getSessionValue(session, EmailKey)\n\tname := getSessionValue(session, NameKey)\n\treturn &LoginInfo{email, name}\n}", "func (a *noAuth) Login(w http.ResponseWriter, r *http.Request) {}", "func authenticatedGet(geturl, token string) (*http.Response, error) {\n\ttokenHdr := fmt.Sprintf(\"Token token=%s\", token)\n\n\treq, err := http.NewRequest(\"GET\", geturl, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Accept\", \"application/vnd.pagerduty+json;version=2\")\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Authorization\", tokenHdr)\n\n\tclient := &http.Client{}\n\tr, err := client.Do(req)\n\n\treturn r, err\n}", "func UsersLoginGet(c buffalo.Context) error {\n\treturn c.Render(200, r.HTML(\"users/login\"))\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
LoginGET displays the login page
func LoginGET(w http.ResponseWriter, r *http.Request) { sess := model.Instance(r) v := view.New(r) v.Name = "login/login" v.Vars["token"] = csrfbanana.Token(w, r, sess) // Refill any form fields view.Repopulate([]string{"cuenta","password"}, r.Form, v.Vars) v.Render(w) }
[ "func LoginGET(c *gin.Context) {\n\tc.HTML(http.StatusOK, \"login.html\", gin.H{})\n}", "func (m *Repository) GetLogin(w http.ResponseWriter, r *http.Request) {\n\tif m.App.Session.Exists(r.Context(), \"user_id\") {\n\t\thttp.Redirect(w, r, \"/\", http.StatusSeeOther)\n\t\treturn\n\t}\n\n\trender.Template(w, r, \"login.page.tmpl\", &models.TemplateData{\n\t\tForm: forms.New(nil),\n\t})\n}", "func login(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\tif _, err := data.CheckSession(r); err == nil {\n\t\tgenerateHTML(w, nil, []string{\"login.layout\", \"private.navbar\", \"login\"})\n\t} else {\n\t\tgenerateHTML(w, nil, []string{\"login.layout\", \"public.navbar\", \"login\"})\n\t}\n}", "func GetLogin(w http.ResponseWriter, req *http.Request, app *App) {\n\tif models.UserCount(app.Db) > 0 {\n\t\trender(w, \"admin/login\", map[string]interface{}{\"hideNav\": true}, app)\n\t} else {\n\t\thttp.Redirect(w, req, app.Config.General.Prefix+\"/register\", http.StatusSeeOther)\n\t}\n\n}", "func Login(w http.ResponseWriter, r *http.Request) {\n\n\tpageVars := PageVars{}\n\taddPageVars(r, &pageVars)\n\trender(w, \"login\", pageVars)\n}", "func (a *noAuth) Login(w http.ResponseWriter, r *http.Request) {}", "func (h *Handler) HandleAdminLoginGET(w http.ResponseWriter, r *http.Request) {\n\tviewArgs := map[string]interface{}{}\n\trenderHTML(w, \"admin-login.html\", viewArgs)\n\treturn\n}", "func login(w http.ResponseWriter, r *http.Request) {\n\tcxt := loginContext{}\n\n\tif r.PostFormValue(\"username\") != \"\" {\n\t\tuser := r.PostFormValue(\"username\")\n\t\tpass := r.PostFormValue(\"password\")\n\t\tfmt.Printf(\"[INFO] Login request: username=%s, password=%s\\n\", user, pass)\n\t\tif validateLogin(user, pass) {\n\t\t\tlogmein(user, pass)\n\t\t\tshow(w, r)\n\t\t} else {\n\t\t\tfmt.Printf(\"[INFO] login rejected: username=%s, password=%s\\n\",\n\t\t\t\tuser, pass)\n\t\t}\n\t}\n\n\tfmt.Println(\"[INFO] Rendering template: login.html\")\n\ttemplates.ExecuteTemplate(w, \"login.html\", &cxt)\n}", "func UsersLoginGet(c buffalo.Context) error {\n\treturn c.Render(200, r.HTML(\"users/login\"))\n}", "func (h *Handler) LoginGet(relativePath string, f ActionFunc) {\n\th.pushAction(GET, relativePath, f, true)\n}", "func showLoginPage(c *gin.Context) {\n\trender(c, gin.H{\n\t\t\"title\": \"Login\",\n\t\t\"ErrorMessage\": \"\",\n\t}, \"login.html\")\n}", "func (c Control) ServeLogin(w http.ResponseWriter, r *http.Request) {\n\ttemplate := map[string]interface{}{\"error\": false}\n\tif r.Method == http.MethodPost {\n\t\t// Get their submitted hash and the real hash.\n\t\tpassword := r.PostFormValue(\"password\")\n\t\thash := HashPassword(password)\n\t\tc.Config.RLock()\n\t\trealHash := c.Config.AdminHash\n\t\tc.Config.RUnlock()\n\t\t// Check if they got the password correct.\n\t\tif hash == realHash {\n\t\t\ts, _ := Store.Get(r, \"sessid\")\n\t\t\ts.Values[\"authenticated\"] = true\n\t\t\ts.Save(r, w)\n\t\t\thttp.Redirect(w, r, \"/\", http.StatusTemporaryRedirect)\n\t\t\treturn\n\t\t}\n\t\ttemplate[\"error\"] = true\n\t}\n\n\t// Serve login page with no template.\n\tdata, err := Asset(\"templates/login.mustache\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\tcontent := mustache.Render(string(data), template)\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\tw.Write([]byte(content))\n}", "func Login(w http.ResponseWriter, r *http.Request) {\n\tconditionsMap := map[string]interface{}{}\n\tsession, err := loggedUserSession.Get(r, \"authenticated-user-session\")\n\tif err != nil {\n\t\tlog.Println(\"Não há dados na sessão login\", err)\n\t}\n\n\tlog.Println(\"Session name : \", session.Name())\n\tlog.Println(\"Username Login : \", session.Values[\"username\"])\n\tconditionsMap[\"Username\"] = session.Values[\"username\"]\n\n\tif session.Values[\"tipoAcesso\"] == \"adm\" {\n\t\tconditionsMap[\"tipoAcesso\"] = session.Values[\"tipoAcesso\"]\n\t}\n\n\tif conditionsMap[\"Username\"] != \"\" && conditionsMap[\"Username\"] != nil {\n\t\ttmpl.ExecuteTemplate(w, \"login.html\", conditionsMap)\n\t} else {\n\t\ttmpl.ExecuteTemplate(w, \"login.html\", nil)\n\t}\n}", "func (s TppHTTPServer) Login(w http.ResponseWriter, r *http.Request, p httprouter.Params) {\n\tdata := LoginPageData{PageTitle: \"AXA Pay Bank Login\", BankID: s.BankID, PIN: s.PIN}\n\t//fmt.Fprint(w, \"TPP server reference PISP Login\\n\")\n\ttmpl := template.Must(template.ParseFiles(\"api/login.html\"))\n\tr.ParseForm()\n\tlog.Println(\"Form:\", r.Form)\n\ttmpl.Execute(w, data)\n\n}", "func Login(ctx echo.Context) error {\n\n\tf := forms.New(utils.GetLang(ctx))\n\tutils.SetData(ctx, \"form\", f)\n\n\t// set page tittle to login\n\tutils.SetData(ctx, settings.PageTitle, \"login\")\n\n\treturn ctx.Render(http.StatusOK, tmpl.LoginTpl, utils.GetData(ctx))\n}", "func (h *Handler) LoginHandler(w http.ResponseWriter, r *http.Request) {\n\n\tchallenge, err := readURLChallangeParams(r, \"login\")\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif r.Method == \"POST\" {\n\t\tif r.Form == nil {\n\t\t\tif err := r.ParseForm(); err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tuserName := r.Form.Get(\"username\")\n\t\tpassword := r.Form.Get(\"password\")\n\t\tloginChallenge := r.Form.Get(\"challenge\")\n\t\tpass, err := h.LoginService.CheckPasswords(userName, password)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\tif pass {\n\n\t\t\tacceptLoginBody := h.ConfigService.FetchAcceptLoginConfig(userName)\n\t\t\trawJson, err := json.Marshal(acceptLoginBody)\n\n\t\t\tredirectURL, err := h.LoginService.SendAcceptBody(\"login\", loginChallenge, rawJson)\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\thttp.Redirect(w, r, redirectURL, http.StatusFound)\n\t\t}\n\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\ttemplLogin := template.Must(template.ParseFiles(\"templates/login.html\"))\n\t\tloginData := h.ConfigService.FetchLoginConfig(challenge, true)\n\t\ttemplLogin.Execute(w, loginData)\n\t} else {\n\t\tchallengeBody, err := h.LoginService.ReadChallenge(challenge, \"login\")\n\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tlog.Print(err)\n\t\t}\n\n\t\tif !challengeBody.Skip {\n\t\t\ttemplLogin := template.Must(template.ParseFiles(\"templates/login.html\"))\n\t\t\tloginData := h.ConfigService.FetchLoginConfig(challenge, false)\n\t\t\ttemplLogin.Execute(w, loginData)\n\t\t} else {\n\n\t\t\tacceptLoginBody := h.ConfigService.FetchAcceptLoginConfig(challengeBody.Subject)\n\t\t\trawJson, err := json.Marshal(acceptLoginBody)\n\n\t\t\tredirectURL, err := h.LoginService.SendAcceptBody(\"login\", challenge, rawJson)\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\thttp.Redirect(w, r, redirectURL, http.StatusFound)\n\n\t\t}\n\t}\n}", "func login(w http.ResponseWriter, r *http.Request) {\n clearCache(w)\n exists, _ := getCookie(r, LOGIN_COOKIE)\n if exists {\n http.Redirect(w, r, \"/home\", http.StatusSeeOther)\n return\n }\n if r.Method == http.MethodGet {\n LOG[INFO].Println(\"Login Page\")\n http.ServeFile(w, r, \"../../web/login.html\")\n } else if r.Method == http.MethodPost {\n LOG[INFO].Println(\"Executing Login\")\n r.ParseForm()\n LOG[INFO].Println(\"Form Values: Username\", r.PostFormValue(\"username\"))\n passhash := sha512.Sum512([]byte(r.PostFormValue(\"password\")))\n LOG[INFO].Println(\"Hex Encoded Passhash:\", hex.EncodeToString(passhash[:]))\n response := sendCommand(CommandRequest{CommandLogin, struct{\n Username string\n Password string\n }{\n r.PostFormValue(\"username\"),\n hex.EncodeToString(passhash[:]),\n }})\n if response == nil {\n http.SetCookie(w, genCookie(ERROR_COOKIE, \"Send Command Error\"))\n http.Redirect(w, r, \"/error\", http.StatusSeeOther)\n return\n }\n if !response.Success {\n LOG[WARNING].Println(StatusText(response.Status))\n http.Redirect(w, r, \"/login\", http.StatusSeeOther)\n return\n }\n\n LOG[INFO].Println(\"Successfully Logged In\")\n http.SetCookie(w, genCookie(LOGIN_COOKIE, r.PostFormValue(\"username\")))\n http.Redirect(w, r, \"/home\", http.StatusSeeOther)\n }\n}", "func logIn(res http.ResponseWriter, req *http.Request) {\n\t// retrive the name form URL\n\tname := req.FormValue(\"name\")\n\tname = html.EscapeString(name)\n\tif name != \"\" {\n\t\tuuid := generateUniqueId()\n\t\tsessionsSyncLoc.Lock()\n\t\tsessions[uuid] = name\n\t\tsessionsSyncLoc.Unlock()\n\n\t\t// save uuid in the cookie\n\t\tcookie := http.Cookie{Name: \"uuid\", Value: uuid, Path: \"/\"}\n\t\thttp.SetCookie(res, &cookie)\n\n\t\t// redirect to /index.html endpoint\n\t\thttp.Redirect(res, req, \"/index.html\", http.StatusFound)\n\t} else {\n\t\t// if the provided input - name is empty, display this message\n\t\tres.Header().Set(\"Content-Type\", \"text/html\")\n\t\tfmt.Fprintf(\n\t\t\tres,\n\t\t\t`<html>\n\t\t\t<body>\n\t\t\t<form action=\"login\">\n\t\t\t C'mon, I need a name.\n\t\t\t</form>\n\t\t\t</p>\n\t\t\t</body>\n\t\t\t</html>`,\n\t\t)\n\t}\n}", "func UserLogin(w http.ResponseWriter, r *http.Request) {\n\t//log.Println(r.Method)\n\tvar err error\n\tif r.Method != http.MethodPost {\n\t\tt := template.Must(template.ParseFiles(\n\t\t\t\"/home/autotest/go/src/github.com/600lyy/accountservice/templates/html/login/home.html\"))\n\t\tt.Execute(w, nil)\n\t\treturn\n\t}\n\n\tr.ParseForm()\n\tlog.Println(r.Form)\n\tusername := r.Form[\"username\"][0]\n\t\n\tif username != \"\" {\n\t\taccount, _ := DBClient.QueryAccount(username)\n\t\tpassword := r.Form[\"password\"][0]\n\t\tif password == account.Passwd {\n\t\t\terr = nil\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"authentication failed, check user password\")\n\t\t}\n\t}\n\t\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t\tw.Header().Set(\"Content-Length\", strconv.Itoa(len(err.Error())))\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a new JWK for the desired type. An error will be returned if an invalid type is passed
func NewJwk(kty string) (j *Jwk, err error) { switch kty { case KeyTypeOct, KeyTypeRSA, KeyTypeEC: j = &Jwk{Type: kty} default: err = errors.New("Key Type Invalid. Must be Oct, RSA or EC") } return }
[ "func NewJWK(jwk map[string]interface{}) JWK {\n\treturn jwk\n}", "func (pk PublicKey) JWK() JWK {\n\tentry, ok := pk[JwkProperty]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tjson, ok := entry.(map[string]interface{})\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn NewJWK(json)\n}", "func MustNewKeyWithType(input string) KeyWithType {\n\tkwt, err := NewKeyWithType(input)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn kwt\n}", "func New(key interface{}) (Key, error) {\n\tif key == nil {\n\t\treturn nil, errors.New(\"jwk.New requires a non-nil key\")\n\t}\n\n\tswitch v := key.(type) {\n\tcase *rsa.PrivateKey:\n\t\treturn newRSAPrivateKey(v)\n\tcase *rsa.PublicKey:\n\t\treturn newRSAPublicKey(v)\n\tcase *ecdsa.PrivateKey:\n\t\treturn newECDSAPrivateKey(v)\n\tcase *ecdsa.PublicKey:\n\t\treturn newECDSAPublicKey(v)\n\tcase []byte:\n\t\treturn newSymmetricKey(v)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"invalid key type %T\", key)\n\t}\n}", "func (a *ACMEInstance) CreateJWK() (jwk JWK) {\n\n\t//TODO Mode Switch for different crypto Primitives...\n\tvar pkN *big.Int = (a.serverKey.PublicKey.(rsa.PublicKey)).N //ist vom type *big.Int... habe es absichtlich explizit hingeschrieben\n\tvar pkE int = (a.serverKey.PublicKey.(rsa.PublicKey)).E //type int\n\n\tpkNencoded := base64.RawURLEncoding.EncodeToString(pkN.Bytes())\n\tpkEencoded := base64.RawURLEncoding.EncodeToString(big.NewInt(int64(pkE)).Bytes())\n\n\tjwk = JWK{E: pkEencoded,\n\t\tKty: \"RSA\",\n\t\tN: pkNencoded,\n\t}\n\n\treturn jwk\n}", "func AsJWK(key interface{}) (*jose.JsonWebKey, error) {\n\tJWK := jose.JsonWebKey{\n\t\tKey: key,\n\t\tAlgorithm: string(jose.RSA1_5),\n\t}\n\tthumbprint, err := JWK.Thumbprint(crypto.SHA256)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tJWK.KeyID = base64.URLEncoding.EncodeToString(thumbprint)\n\treturn &JWK, nil\n}", "func KeyTypeToJWA(keyType kms.KeyType) string {\n\treturn kmssigner.KeyTypeToJWA(keyType)\n}", "func NewKeyWithType(input string) (KeyWithType, error) {\n\tparts := strings.Split(input, \":\")\n\tif len(parts) != 2 {\n\t\treturn KeyWithType{}, fmt.Errorf(\"key must be of the form <algorithm>:<key in base64>, was: %s\", input)\n\t}\n\n\tkeyBytes, err := base64.StdEncoding.DecodeString(parts[1])\n\tif err != nil {\n\t\treturn KeyWithType{}, fmt.Errorf(\"failed to base64-decode key: %v\", err)\n\t}\n\n\tif parts[0] == \"RSA\" {\n\t\tif privKey, err := RSAPrivateKeyFromBytes(keyBytes); err == nil {\n\t\t\t// legacy private key\n\t\t\treturn privKey, nil\n\t\t} else if pubKey, err := RSAPublicKeyFromBytes(keyBytes); err == nil {\n\t\t\t// legacy public key\n\t\t\treturn pubKey, nil\n\t\t}\n\n\t\t// could not parse legacy key\n\t\treturn KeyWithType{}, fmt.Errorf(\"unable to parse legacy RSA key\")\n\t}\n\n\talg, err := ToKeyType(parts[0])\n\tif err != nil {\n\t\treturn KeyWithType{}, err\n\t}\n\treturn alg.Generator()(keyBytes)\n}", "func New(key interface{}) (Key, error) {\n\tif key == nil {\n\t\treturn nil, errors.New(`jwk.New requires a non-nil key`)\n\t}\n\n\tvar ptr interface{}\n\tswitch v := key.(type) {\n\tcase rsa.PrivateKey:\n\t\tptr = &v\n\tcase rsa.PublicKey:\n\t\tptr = &v\n\tcase ecdsa.PrivateKey:\n\t\tptr = &v\n\tcase ecdsa.PublicKey:\n\t\tptr = &v\n\tdefault:\n\t\tptr = v\n\t}\n\n\tswitch rawKey := ptr.(type) {\n\tcase *rsa.PrivateKey:\n\t\tk := NewRSAPrivateKey()\n\t\tif err := k.FromRaw(rawKey); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, `failed to initialize %T from %T`, k, rawKey)\n\t\t}\n\t\treturn k, nil\n\tcase *rsa.PublicKey:\n\t\tk := NewRSAPublicKey()\n\t\tif err := k.FromRaw(rawKey); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, `failed to initialize %T from %T`, k, rawKey)\n\t\t}\n\t\treturn k, nil\n\tcase *ecdsa.PrivateKey:\n\t\tk := NewECDSAPrivateKey()\n\t\tif err := k.FromRaw(rawKey); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, `failed to initialize %T from %T`, k, rawKey)\n\t\t}\n\t\treturn k, nil\n\tcase *ecdsa.PublicKey:\n\t\tk := NewECDSAPublicKey()\n\t\tif err := k.FromRaw(rawKey); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, `failed to initialize %T from %T`, k, rawKey)\n\t\t}\n\t\treturn k, nil\n\tcase []byte:\n\t\tk := NewSymmetricKey()\n\t\tif err := k.FromRaw(rawKey); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, `failed to initialize %T from %T`, k, rawKey)\n\t\t}\n\t\treturn k, nil\n\tdefault:\n\t\treturn nil, errors.Errorf(`invalid key type '%T' for jwk.New`, key)\n\t}\n}", "func NewJwk() (DiscoveryResponse, error) {\n\tc, _ := NewConfig()\n\tdiscoveryURI := strings.TrimSuffix(c.Issuer, \"/\") + \"/.well-known/openid-configuration\"\n\n\tclient := http.Client{\n\t\tTimeout: 10 * time.Second,\n\t}\n\n\tres, err := client.Get(discoveryURI)\n\tif err != nil {\n\t\treturn DiscoveryResponse{}, err\n\t}\n\tdefer res.Body.Close()\n\n\tvar discoResp struct {\n\t\tJwkURI string `json:\"jwks_uri\"`\n\t\tRevocationEndpoint string `json:\"revocation_endpoint\"`\n\t}\n\n\tif err := json.NewDecoder(res.Body).Decode(&discoResp); err != nil {\n\t\treturn DiscoveryResponse{}, err\n\t}\n\n\tres, err = client.Get(discoResp.JwkURI)\n\tif err != nil {\n\t\treturn DiscoveryResponse{}, err\n\t}\n\tdefer res.Body.Close()\n\n\tvar keySet jose.JSONWebKeySet\n\tif err := json.NewDecoder(res.Body).Decode(&keySet); err != nil {\n\t\treturn DiscoveryResponse{}, err\n\t}\n\n\treturn DiscoveryResponse{\n\t\tRevocationEndpoint: discoResp.RevocationEndpoint,\n\t\tJSONWebKeySet: &keySet,\n\t}, nil\n}", "func ToJWK(key interface{}) (*Key, error) {\n\tswitch k := key.(type) {\n\tcase *ecdsa.PrivateKey:\n\t\tkey := k.Public().(*ecdsa.PublicKey)\n\t\treturn buildJWKFromECDSA(key)\n\tcase *ecdsa.PublicKey:\n\t\treturn buildJWKFromECDSA(k)\n\tcase *rsa.PrivateKey:\n\t\tkey := k.Public().(*rsa.PublicKey)\n\t\treturn buildJWKFromRSA(key)\n\tcase *rsa.PublicKey:\n\t\treturn buildJWKFromRSA(k)\n\tdefault:\n\t\treturn nil, acmecrypto.ErrKeyFormat\n\t}\n}", "func (jwk *Jwk) MarshalJSON() (data []byte, err error) {\n\n\t// Remove any potentionally conflicting claims from the JWK's additional members\n\tdelete(jwk.AdditionalMembers, \"kty\")\n\tdelete(jwk.AdditionalMembers, \"kid\")\n\tdelete(jwk.AdditionalMembers, \"alg\")\n\tdelete(jwk.AdditionalMembers, \"use\")\n\tdelete(jwk.AdditionalMembers, \"key_ops\")\n\tdelete(jwk.AdditionalMembers, \"crv\")\n\tdelete(jwk.AdditionalMembers, \"x\")\n\tdelete(jwk.AdditionalMembers, \"y\")\n\tdelete(jwk.AdditionalMembers, \"d\")\n\tdelete(jwk.AdditionalMembers, \"n\")\n\tdelete(jwk.AdditionalMembers, \"p\")\n\tdelete(jwk.AdditionalMembers, \"q\")\n\tdelete(jwk.AdditionalMembers, \"dp\")\n\tdelete(jwk.AdditionalMembers, \"dq\")\n\tdelete(jwk.AdditionalMembers, \"qi\")\n\tdelete(jwk.AdditionalMembers, \"e\")\n\tdelete(jwk.AdditionalMembers, \"oth\")\n\tdelete(jwk.AdditionalMembers, \"k\")\n\n\t// There are additional claims, individually marshal each member\n\tobj := make(map[string]*json.RawMessage, len(jwk.AdditionalMembers)+10)\n\n\tif bytes, err := json.Marshal(jwk.Type); err == nil {\n\t\trm := json.RawMessage(bytes)\n\t\tobj[\"kty\"] = &rm\n\t} else {\n\t\treturn nil, err\n\t}\n\n\tif len(jwk.Id) > 0 {\n\t\tif bytes, err := json.Marshal(jwk.Id); err == nil {\n\t\t\trm := json.RawMessage(bytes)\n\t\t\tobj[\"kid\"] = &rm\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif len(jwk.Algorithm) > 0 {\n\t\tif bytes, err := json.Marshal(jwk.Algorithm); err == nil {\n\t\t\trm := json.RawMessage(bytes)\n\t\t\tobj[\"alg\"] = &rm\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif len(jwk.Use) > 0 {\n\t\tif bytes, err := json.Marshal(jwk.Use); err == nil {\n\t\t\trm := json.RawMessage(bytes)\n\t\t\tobj[\"use\"] = &rm\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif len(jwk.Operations) > 0 {\n\t\tif bytes, err := json.Marshal(jwk.Operations); err == nil {\n\t\t\trm := json.RawMessage(bytes)\n\t\t\tobj[\"key_ops\"] = &rm\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tswitch jwk.Type {\n\tcase KeyTypeEC:\n\t\t{\n\t\t\tif jwk.Curve != nil {\n\t\t\t\tjwk.Curve.Params()\n\t\t\t\tp := jwk.Curve.Params()\n\t\t\t\tif bytes, err := json.Marshal(p.Name); err == nil {\n\t\t\t\t\trm := json.RawMessage(bytes)\n\t\t\t\t\tobj[\"crv\"] = &rm\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif jwk.X != nil {\n\t\t\t\tb64u := &Base64UrlUInt{UInt: jwk.X}\n\t\t\t\tif bytes, err := json.Marshal(b64u); err == nil {\n\t\t\t\t\trm := json.RawMessage(bytes)\n\t\t\t\t\tobj[\"x\"] = &rm\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif jwk.Y != nil {\n\t\t\t\tb64u := &Base64UrlUInt{UInt: jwk.Y}\n\t\t\t\tif bytes, err := json.Marshal(b64u); err == nil {\n\t\t\t\t\trm := json.RawMessage(bytes)\n\t\t\t\t\tobj[\"y\"] = &rm\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif jwk.D != nil {\n\t\t\t\tb64u := &Base64UrlUInt{UInt: jwk.D}\n\t\t\t\tif bytes, err := json.Marshal(b64u); err == nil {\n\t\t\t\t\trm := json.RawMessage(bytes)\n\t\t\t\t\tobj[\"d\"] = &rm\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tcase KeyTypeRSA:\n\t\t{\n\t\t\tif jwk.D != nil {\n\t\t\t\tb64u := &Base64UrlUInt{UInt: jwk.D}\n\t\t\t\tif bytes, err := json.Marshal(b64u); err == nil {\n\t\t\t\t\trm := json.RawMessage(bytes)\n\t\t\t\t\tobj[\"d\"] = &rm\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif jwk.N != nil {\n\t\t\t\tb64u := &Base64UrlUInt{UInt: jwk.N}\n\t\t\t\tif bytes, err := json.Marshal(b64u); err == nil {\n\t\t\t\t\trm := json.RawMessage(bytes)\n\t\t\t\t\tobj[\"n\"] = &rm\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif jwk.P != nil {\n\t\t\t\tb64u := &Base64UrlUInt{UInt: jwk.P}\n\t\t\t\tif bytes, err := json.Marshal(b64u); err == nil {\n\t\t\t\t\trm := json.RawMessage(bytes)\n\t\t\t\t\tobj[\"p\"] = &rm\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif jwk.Q != nil {\n\t\t\t\tb64u := &Base64UrlUInt{UInt: jwk.Q}\n\t\t\t\tif bytes, err := json.Marshal(b64u); err == nil {\n\t\t\t\t\trm := json.RawMessage(bytes)\n\t\t\t\t\tobj[\"q\"] = &rm\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif jwk.Dp != nil {\n\t\t\t\tb64u := &Base64UrlUInt{UInt: jwk.Dp}\n\t\t\t\tif bytes, err := json.Marshal(b64u); err == nil {\n\t\t\t\t\trm := json.RawMessage(bytes)\n\t\t\t\t\tobj[\"dp\"] = &rm\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif jwk.Dq != nil {\n\t\t\t\tb64u := &Base64UrlUInt{UInt: jwk.Dq}\n\t\t\t\tif bytes, err := json.Marshal(b64u); err == nil {\n\t\t\t\t\trm := json.RawMessage(bytes)\n\t\t\t\t\tobj[\"dq\"] = &rm\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif jwk.Qi != nil {\n\t\t\t\tb64u := &Base64UrlUInt{UInt: jwk.Qi}\n\t\t\t\tif bytes, err := json.Marshal(b64u); err == nil {\n\t\t\t\t\trm := json.RawMessage(bytes)\n\t\t\t\t\tobj[\"qi\"] = &rm\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif jwk.E >= 0 {\n\t\t\t\tb64u := &Base64UrlUInt{UInt: big.NewInt(int64(jwk.E))}\n\t\t\t\tif bytes, err := json.Marshal(&b64u); err == nil {\n\t\t\t\t\trm := json.RawMessage(bytes)\n\t\t\t\t\tobj[\"e\"] = &rm\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(jwk.OtherPrimes) > 0 {\n\t\t\t\ttempOthPrimes := make([]jwkOthPrimeJSON, len(jwk.OtherPrimes))\n\t\t\t\tfor i, v := range jwk.OtherPrimes {\n\t\t\t\t\ttempOthPrimes[i].Coeff = &Base64UrlUInt{UInt: v.Coeff}\n\t\t\t\t\ttempOthPrimes[i].Exp = &Base64UrlUInt{UInt: v.Exp}\n\t\t\t\t\ttempOthPrimes[i].R = &Base64UrlUInt{UInt: v.R}\n\t\t\t\t}\n\n\t\t\t\tif bytes, err := json.Marshal(tempOthPrimes); err == nil {\n\t\t\t\t\trm := json.RawMessage(bytes)\n\t\t\t\t\tobj[\"oth\"] = &rm\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tcase KeyTypeOct:\n\t\t{\n\t\t\tif len(jwk.KeyValue) > 0 {\n\t\t\t\tb64o := &Base64UrlOctets{Octets: jwk.KeyValue}\n\t\t\t\tif bytes, err := json.Marshal(b64o); err == nil {\n\t\t\t\t\trm := json.RawMessage(bytes)\n\t\t\t\t\tobj[\"k\"] = &rm\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\t//Iterate through remaing members and add to json rawMessage\n\tfor k, v := range jwk.AdditionalMembers {\n\t\tif bytes, err := json.Marshal(v); err == nil {\n\t\t\trm := json.RawMessage(bytes)\n\t\t\tobj[k] = &rm\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Marshal obj\n\treturn json.Marshal(obj)\n}", "func CreateJWTSigningKey(algorithm string, key any) (JWTSigningKey, error) {\n\tvar signingMethod jwt.SigningMethod\n\tswitch algorithm {\n\tcase \"HS256\":\n\t\tsigningMethod = jwt.SigningMethodHS256\n\tcase \"HS384\":\n\t\tsigningMethod = jwt.SigningMethodHS384\n\tcase \"HS512\":\n\t\tsigningMethod = jwt.SigningMethodHS512\n\n\tcase \"RS256\":\n\t\tsigningMethod = jwt.SigningMethodRS256\n\tcase \"RS384\":\n\t\tsigningMethod = jwt.SigningMethodRS384\n\tcase \"RS512\":\n\t\tsigningMethod = jwt.SigningMethodRS512\n\n\tcase \"ES256\":\n\t\tsigningMethod = jwt.SigningMethodES256\n\tcase \"ES384\":\n\t\tsigningMethod = jwt.SigningMethodES384\n\tcase \"ES512\":\n\t\tsigningMethod = jwt.SigningMethodES512\n\tcase \"EdDSA\":\n\t\tsigningMethod = jwt.SigningMethodEdDSA\n\tdefault:\n\t\treturn nil, ErrInvalidAlgorithmType{algorithm}\n\t}\n\n\tswitch signingMethod.(type) {\n\tcase *jwt.SigningMethodEd25519:\n\t\tprivateKey, ok := key.(ed25519.PrivateKey)\n\t\tif !ok {\n\t\t\treturn nil, jwt.ErrInvalidKeyType\n\t\t}\n\t\treturn newEdDSASingingKey(signingMethod, privateKey)\n\tcase *jwt.SigningMethodECDSA:\n\t\tprivateKey, ok := key.(*ecdsa.PrivateKey)\n\t\tif !ok {\n\t\t\treturn nil, jwt.ErrInvalidKeyType\n\t\t}\n\t\treturn newECDSASingingKey(signingMethod, privateKey)\n\tcase *jwt.SigningMethodRSA:\n\t\tprivateKey, ok := key.(*rsa.PrivateKey)\n\t\tif !ok {\n\t\t\treturn nil, jwt.ErrInvalidKeyType\n\t\t}\n\t\treturn newRSASingingKey(signingMethod, privateKey)\n\tdefault:\n\t\tsecret, ok := key.([]byte)\n\t\tif !ok {\n\t\t\treturn nil, jwt.ErrInvalidKeyType\n\t\t}\n\t\treturn hmacSigningKey{signingMethod, secret}, nil\n\t}\n}", "func (jwk *Jwk) Validate() error {\n\n\t// If the alg parameter is set, make sure it matches the set JWK Type\n\tif len(jwk.Algorithm) > 0 {\n\t\talgKeyType := GetKeyType(jwk.Algorithm)\n\t\tif algKeyType != jwk.Type {\n\t\t\tfmt.Errorf(\"Jwk Type (kty=%v) doesn't match the algorithm key type (%v)\", jwk.Type, algKeyType)\n\t\t}\n\t}\n\tswitch jwk.Type {\n\tcase KeyTypeRSA:\n\t\tif err := jwk.validateRSAParams(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tcase KeyTypeEC:\n\t\tif err := jwk.validateECParams(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tcase KeyTypeOct:\n\t\tif err := jwk.validateOctParams(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tdefault:\n\t\treturn errors.New(\"KeyType (kty) must be EC, RSA or Oct\")\n\t}\n\n\treturn nil\n}", "func NewJwkSet(target string) (*JwkSet, error) {\n\tif target == \"\" {\n\t\treturn nil, errors.New(\"invalid empty target url\")\n\t}\n\n\tdata, err := getHttpRespData(target)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn parseJwksData(data)\n}", "func NewJWT(claims Claims, method crypto.SigningMethod) jwt.JWT {\n\tj, ok := New(claims, method).(*jws)\n\tif !ok {\n\t\tpanic(\"jws.NewJWT: runtime panic: New(...).(*jws) != true\")\n\t}\n\tj.sb[0].protected.Set(\"typ\", \"JWT\")\n\tj.isJWT = true\n\treturn j\n}", "func GetKeyType(typename string) (keytype KeyType) {\n\tswitch {\n\tcase typename == \"none\":\n\t\tkeytype = RT_NONE\n\tcase typename == \"string\":\n\t\tkeytype = RT_STRING\n\tcase typename == \"list\":\n\t\tkeytype = RT_LIST\n\tcase typename == \"set\":\n\t\tkeytype = RT_SET\n\tcase typename == \"zset\":\n\t\tkeytype = RT_ZSET\n\tdefault:\n\t\tpanic(\"BUG - unknown type: \" + string(keytype))\n\t}\n\treturn\n}", "func checkJWSAuthType(jws *jose.JSONWebSignature) (jwsAuthType, *acme.ProblemDetails) {\n\t// checkJWSAuthType is called after parseJWS() which defends against the\n\t// incorrect number of signatures.\n\theader := jws.Signatures[0].Header\n\t// There must not be a Key ID *and* an embedded JWK\n\tif header.KeyID != \"\" && header.JSONWebKey != nil {\n\t\treturn invalidAuthType, acme.MalformedProblem(\"jwk and kid header fields are mutually exclusive\")\n\t} else if header.KeyID != \"\" {\n\t\treturn embeddedKeyID, nil\n\t} else if header.JSONWebKey != nil {\n\t\treturn embeddedJWK, nil\n\t}\n\treturn invalidAuthType, nil\n}", "func mustUnmarshalJWK(s string) *jose.JSONWebKey {\n\tret := &jose.JSONWebKey{}\n\tif err := json.Unmarshal([]byte(s), ret); err != nil {\n\t\tpanic(err)\n\t}\n\treturn ret\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implements the json.Marshaler interface and JSON encodes the Jwk
func (jwk *Jwk) MarshalJSON() (data []byte, err error) { // Remove any potentionally conflicting claims from the JWK's additional members delete(jwk.AdditionalMembers, "kty") delete(jwk.AdditionalMembers, "kid") delete(jwk.AdditionalMembers, "alg") delete(jwk.AdditionalMembers, "use") delete(jwk.AdditionalMembers, "key_ops") delete(jwk.AdditionalMembers, "crv") delete(jwk.AdditionalMembers, "x") delete(jwk.AdditionalMembers, "y") delete(jwk.AdditionalMembers, "d") delete(jwk.AdditionalMembers, "n") delete(jwk.AdditionalMembers, "p") delete(jwk.AdditionalMembers, "q") delete(jwk.AdditionalMembers, "dp") delete(jwk.AdditionalMembers, "dq") delete(jwk.AdditionalMembers, "qi") delete(jwk.AdditionalMembers, "e") delete(jwk.AdditionalMembers, "oth") delete(jwk.AdditionalMembers, "k") // There are additional claims, individually marshal each member obj := make(map[string]*json.RawMessage, len(jwk.AdditionalMembers)+10) if bytes, err := json.Marshal(jwk.Type); err == nil { rm := json.RawMessage(bytes) obj["kty"] = &rm } else { return nil, err } if len(jwk.Id) > 0 { if bytes, err := json.Marshal(jwk.Id); err == nil { rm := json.RawMessage(bytes) obj["kid"] = &rm } else { return nil, err } } if len(jwk.Algorithm) > 0 { if bytes, err := json.Marshal(jwk.Algorithm); err == nil { rm := json.RawMessage(bytes) obj["alg"] = &rm } else { return nil, err } } if len(jwk.Use) > 0 { if bytes, err := json.Marshal(jwk.Use); err == nil { rm := json.RawMessage(bytes) obj["use"] = &rm } else { return nil, err } } if len(jwk.Operations) > 0 { if bytes, err := json.Marshal(jwk.Operations); err == nil { rm := json.RawMessage(bytes) obj["key_ops"] = &rm } else { return nil, err } } switch jwk.Type { case KeyTypeEC: { if jwk.Curve != nil { jwk.Curve.Params() p := jwk.Curve.Params() if bytes, err := json.Marshal(p.Name); err == nil { rm := json.RawMessage(bytes) obj["crv"] = &rm } else { return nil, err } } if jwk.X != nil { b64u := &Base64UrlUInt{UInt: jwk.X} if bytes, err := json.Marshal(b64u); err == nil { rm := json.RawMessage(bytes) obj["x"] = &rm } else { return nil, err } } if jwk.Y != nil { b64u := &Base64UrlUInt{UInt: jwk.Y} if bytes, err := json.Marshal(b64u); err == nil { rm := json.RawMessage(bytes) obj["y"] = &rm } else { return nil, err } } if jwk.D != nil { b64u := &Base64UrlUInt{UInt: jwk.D} if bytes, err := json.Marshal(b64u); err == nil { rm := json.RawMessage(bytes) obj["d"] = &rm } else { return nil, err } } } case KeyTypeRSA: { if jwk.D != nil { b64u := &Base64UrlUInt{UInt: jwk.D} if bytes, err := json.Marshal(b64u); err == nil { rm := json.RawMessage(bytes) obj["d"] = &rm } else { return nil, err } } if jwk.N != nil { b64u := &Base64UrlUInt{UInt: jwk.N} if bytes, err := json.Marshal(b64u); err == nil { rm := json.RawMessage(bytes) obj["n"] = &rm } else { return nil, err } } if jwk.P != nil { b64u := &Base64UrlUInt{UInt: jwk.P} if bytes, err := json.Marshal(b64u); err == nil { rm := json.RawMessage(bytes) obj["p"] = &rm } else { return nil, err } } if jwk.Q != nil { b64u := &Base64UrlUInt{UInt: jwk.Q} if bytes, err := json.Marshal(b64u); err == nil { rm := json.RawMessage(bytes) obj["q"] = &rm } else { return nil, err } } if jwk.Dp != nil { b64u := &Base64UrlUInt{UInt: jwk.Dp} if bytes, err := json.Marshal(b64u); err == nil { rm := json.RawMessage(bytes) obj["dp"] = &rm } else { return nil, err } } if jwk.Dq != nil { b64u := &Base64UrlUInt{UInt: jwk.Dq} if bytes, err := json.Marshal(b64u); err == nil { rm := json.RawMessage(bytes) obj["dq"] = &rm } else { return nil, err } } if jwk.Qi != nil { b64u := &Base64UrlUInt{UInt: jwk.Qi} if bytes, err := json.Marshal(b64u); err == nil { rm := json.RawMessage(bytes) obj["qi"] = &rm } else { return nil, err } } if jwk.E >= 0 { b64u := &Base64UrlUInt{UInt: big.NewInt(int64(jwk.E))} if bytes, err := json.Marshal(&b64u); err == nil { rm := json.RawMessage(bytes) obj["e"] = &rm } else { return nil, err } } if len(jwk.OtherPrimes) > 0 { tempOthPrimes := make([]jwkOthPrimeJSON, len(jwk.OtherPrimes)) for i, v := range jwk.OtherPrimes { tempOthPrimes[i].Coeff = &Base64UrlUInt{UInt: v.Coeff} tempOthPrimes[i].Exp = &Base64UrlUInt{UInt: v.Exp} tempOthPrimes[i].R = &Base64UrlUInt{UInt: v.R} } if bytes, err := json.Marshal(tempOthPrimes); err == nil { rm := json.RawMessage(bytes) obj["oth"] = &rm } else { return nil, err } } } case KeyTypeOct: { if len(jwk.KeyValue) > 0 { b64o := &Base64UrlOctets{Octets: jwk.KeyValue} if bytes, err := json.Marshal(b64o); err == nil { rm := json.RawMessage(bytes) obj["k"] = &rm } else { return nil, err } } } } //Iterate through remaing members and add to json rawMessage for k, v := range jwk.AdditionalMembers { if bytes, err := json.Marshal(v); err == nil { rm := json.RawMessage(bytes) obj[k] = &rm } else { return nil, err } } // Marshal obj return json.Marshal(obj) }
[ "func JSONEncoder() Encoder { return jsonEncoder }", "func (k *Key) MarshalJSON() ([]byte, error) {\n\treturn []byte(`\"` + k.Encode() + `\"`), nil\n}", "func (j *jws) Serialize(key interface{}) ([]byte, error) {\n\tif j.isJWT {\n\t\treturn j.Compact(key)\n\t}\n\treturn nil, ErrIsNotJWT\n}", "func EncodeJSON(w io.Writer, i *Iterator) error {\n\tr := make(Record)\n\n\t// Open paren.\n\tif _, err := w.Write([]byte{'['}); err != nil {\n\t\treturn err\n\t}\n\n\tvar c int\n\tenc := json.NewEncoder(w)\n\n\tdelim := []byte{',', '\\n'}\n\n\tfor i.Next() {\n\t\tif c > 0 {\n\t\t\tif _, err := w.Write(delim); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tc++\n\n\t\tif err := i.Scan(r); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := enc.Encode(r); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Close paren.\n\tif _, err := w.Write([]byte{']'}); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (cpk ContainerPartitionKey) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]interface{})\n\tif cpk.Paths != nil {\n\t\tobjectMap[\"paths\"] = cpk.Paths\n\t}\n\tif cpk.Kind != \"\" {\n\t\tobjectMap[\"kind\"] = cpk.Kind\n\t}\n\tif cpk.Version != nil {\n\t\tobjectMap[\"version\"] = cpk.Version\n\t}\n\treturn json.Marshal(objectMap)\n}", "func (keyMarshaler *SSHKeyMarshaler) MarshalJSON() ([]byte, error) {\n\tjsonData := gin.SSHKey{\n\t\tURL: conf.MakeUrl(\"/api/keys?fingerprint=%s\", keyMarshaler.SSHKey.Fingerprint),\n\t\tFingerprint: keyMarshaler.SSHKey.Fingerprint,\n\t\tKey: keyMarshaler.SSHKey.Key,\n\t\tDescription: keyMarshaler.SSHKey.Description,\n\t\tLogin: keyMarshaler.Account.Login,\n\t\tAccountURL: conf.MakeUrl(\"/api/accounts/%s\", keyMarshaler.Account.Login),\n\t\tCreatedAt: keyMarshaler.SSHKey.CreatedAt,\n\t\tUpdatedAt: keyMarshaler.SSHKey.UpdatedAt,\n\t}\n\treturn json.Marshal(jsonData)\n}", "func (kcp KeyCreateParameters) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]interface{})\n\tif kcp.Tags != nil {\n\t\tobjectMap[\"tags\"] = kcp.Tags\n\t}\n\tif kcp.Properties != nil {\n\t\tobjectMap[\"properties\"] = kcp.Properties\n\t}\n\treturn json.Marshal(objectMap)\n}", "func (rec JSONLogRec) MarshalJSONObject(enc *gojay.Encoder) {\n\tif !rec.DisableTimestamp {\n\t\ttimestampFmt := rec.TimestampFormat\n\t\tif timestampFmt == \"\" {\n\t\t\ttimestampFmt = logr.DefTimestampFormat\n\t\t}\n\t\ttime := rec.Time()\n\t\tenc.AddTimeKey(rec.KeyTimestamp, &time, timestampFmt)\n\t}\n\tif !rec.DisableLevel {\n\t\tenc.AddStringKey(rec.KeyLevel, rec.Level().Name)\n\t}\n\tif !rec.DisableMsg {\n\t\tenc.AddStringKey(rec.KeyMsg, rec.Msg())\n\t}\n\tif !rec.DisableContext {\n\t\tctxFields := rec.sorter(rec.Fields())\n\t\tif rec.KeyContextFields != \"\" {\n\t\t\tenc.AddObjectKey(rec.KeyContextFields, jsonFields(ctxFields))\n\t\t} else {\n\t\t\tif len(ctxFields) > 0 {\n\t\t\t\tfor _, cf := range ctxFields {\n\t\t\t\t\tkey := rec.prefixCollision(cf.Key)\n\t\t\t\t\tencodeField(enc, key, cf.Val)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif rec.stacktrace && !rec.DisableStacktrace {\n\t\tframes := rec.StackFrames()\n\t\tif len(frames) > 0 {\n\t\t\tenc.AddArrayKey(rec.KeyStacktrace, stackFrames(frames))\n\t\t}\n\t}\n\n}", "func (v Signature) EncodeJSON(b []byte) []byte {\n\tb = append(b, `{\"hex_bytes\":`...)\n\tb = json.AppendHexBytes(b, v.Bytes)\n\tb = append(b, `,\"public_key\":`...)\n\tb = v.PublicKey.EncodeJSON(b)\n\tb = append(b, ',', '\"', 's', 'i', 'g', 'n', 'a', 't', 'u', 'r', 'e', '_', 't', 'y', 'p', 'e', '\"', ':')\n\tb = json.AppendString(b, string(v.SignatureType))\n\tb = append(b, ',', '\"', 's', 'i', 'g', 'n', 'i', 'n', 'g', '_', 'p', 'a', 'y', 'l', 'o', 'a', 'd', '\"', ':')\n\tb = v.SigningPayload.EncodeJSON(b)\n\treturn append(b, \"}\"...)\n}", "func (k Kitten) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"eatsMiceYet\", k.EatsMiceYet)\n\tpopulate(objectMap, \"hisses\", k.Hisses)\n\tpopulate(objectMap, \"likesMilk\", k.LikesMilk)\n\tpopulate(objectMap, \"meows\", k.Meows)\n\tpopulate(objectMap, \"name\", k.Name)\n\treturn json.Marshal(objectMap)\n}", "func (a APIKey) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]interface{})\n\tpopulate(objectMap, \"connectionString\", a.ConnectionString)\n\tpopulate(objectMap, \"id\", a.ID)\n\tpopulateTimeRFC3339(objectMap, \"lastModified\", a.LastModified)\n\tpopulate(objectMap, \"name\", a.Name)\n\tpopulate(objectMap, \"readOnly\", a.ReadOnly)\n\tpopulate(objectMap, \"value\", a.Value)\n\treturn json.Marshal(objectMap)\n}", "func (kv KV) MarshalJSON() ([]byte, error) {\n\tbuffer := bytes.NewBufferString(\"{\")\n\n\tjsonKey, err := json.Marshal(kv.Key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tjsonValue, err := json.Marshal(kv.Value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuffer.Write(jsonKey)\n\tbuffer.WriteByte(58)\n\tbuffer.Write(jsonValue)\n\n\tbuffer.WriteString(\"}\")\n\treturn buffer.Bytes(), nil\n}", "func (p PairedKey) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulateAny(objectMap, \"additionalProperties\", p.AdditionalProperties)\n\tpopulate(objectMap, \"id\", p.ID)\n\tpopulate(objectMap, \"type\", p.Type)\n\treturn json.Marshal(objectMap)\n}", "func (k *KeyEnvelope) MarshalJSON() ([]byte, error) {\n\tvar key []byte\n\tif k.KekLabel != \"\" {\n\t\tkey = k.EncryptedKey\n\t} else if k.Key != nil {\n\t\tkey = k.Key[:]\n\t}\n\treturn json.Marshal(struct {\n\t\tKEKLabel string\n\t\tAESKey Buffer\n\t}{\n\t\tKEKLabel: k.KekLabel,\n\t\tAESKey: Buffer(key),\n\t})\n}", "func (gr gelfRecord) MarshalJSONObject(enc *gojay.Encoder) {\n\tenc.AddStringKey(GelfVersionKey, GelfVersion)\n\tenc.AddStringKey(GelfHostKey, gr.getHostname())\n\tenc.AddStringKey(GelfShortKey, gr.Msg())\n\n\tif gr.level.Stacktrace {\n\t\tframes := gr.StackFrames()\n\t\tif len(frames) != 0 {\n\t\t\tvar sbuf strings.Builder\n\t\t\tfor _, frame := range frames {\n\t\t\t\tfmt.Fprintf(&sbuf, \"%s\\n %s:%d\\n\", frame.Function, frame.File, frame.Line)\n\t\t\t}\n\t\t\tenc.AddStringKey(GelfFullKey, sbuf.String())\n\t\t}\n\t}\n\n\tsecs := float64(gr.Time().UTC().Unix())\n\tmillis := float64(gr.Time().Nanosecond() / 1000000)\n\tts := secs + (millis / 1000)\n\tenc.AddFloat64Key(GelfTimestampKey, ts)\n\n\tenc.AddUint32Key(GelfLevelKey, uint32(gr.level.ID))\n\n\tvar fields []logr.Field\n\tif gr.EnableCaller {\n\t\tcaller := logr.Field{\n\t\t\tKey: \"_caller\",\n\t\t\tType: logr.StringType,\n\t\t\tString: gr.LogRec.Caller(),\n\t\t}\n\t\tfields = append(fields, caller)\n\t}\n\n\tfields = append(fields, gr.Fields()...)\n\tif gr.sorter != nil {\n\t\tfields = gr.sorter(fields)\n\t}\n\n\tif len(fields) > 0 {\n\t\tfor _, field := range fields {\n\t\t\tif !strings.HasPrefix(\"_\", field.Key) {\n\t\t\t\tfield.Key = \"_\" + field.Key\n\t\t\t}\n\t\t\tif err := encodeField(enc, field); err != nil {\n\t\t\t\tenc.AddStringKey(field.Key, fmt.Sprintf(\"<error encoding field: %v>\", err))\n\t\t\t}\n\t\t}\n\t}\n}", "func (ka KeyAttributes) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]interface{})\n\tif ka.Enabled != nil {\n\t\tobjectMap[\"enabled\"] = ka.Enabled\n\t}\n\tif ka.NotBefore != nil {\n\t\tobjectMap[\"nbf\"] = ka.NotBefore\n\t}\n\tif ka.Expires != nil {\n\t\tobjectMap[\"exp\"] = ka.Expires\n\t}\n\tif ka.Exportable != nil {\n\t\tobjectMap[\"exportable\"] = ka.Exportable\n\t}\n\treturn json.Marshal(objectMap)\n}", "func (q QnAMakerEndpointKeysRequestBody) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"authkey\", q.Authkey)\n\tpopulate(objectMap, \"hostname\", q.Hostname)\n\treturn json.Marshal(objectMap)\n}", "func (s StreamingPolicyContentKeys) MarshalJSON() ([]byte, error) {\n\tobjectMap := make(map[string]any)\n\tpopulate(objectMap, \"defaultKey\", s.DefaultKey)\n\tpopulate(objectMap, \"keyToTrackMappings\", s.KeyToTrackMappings)\n\treturn json.Marshal(objectMap)\n}", "func (v PublicKey) EncodeJSON(b []byte) []byte {\n\tb = append(b, `{\"hex_bytes\":`...)\n\tb = json.AppendHexBytes(b, v.Bytes)\n\tb = append(b, `,\"curve_type\":`...)\n\tb = json.AppendString(b, string(v.CurveType))\n\treturn append(b, \"}\"...)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate checkes the JWK object to verify the parameter set represent a valid JWK. If jwk is valid a nil error will be returned. If a JWK is invalid an error will be returned describing the values that causes the validation to fail.
func (jwk *Jwk) Validate() error { // If the alg parameter is set, make sure it matches the set JWK Type if len(jwk.Algorithm) > 0 { algKeyType := GetKeyType(jwk.Algorithm) if algKeyType != jwk.Type { fmt.Errorf("Jwk Type (kty=%v) doesn't match the algorithm key type (%v)", jwk.Type, algKeyType) } } switch jwk.Type { case KeyTypeRSA: if err := jwk.validateRSAParams(); err != nil { return err } case KeyTypeEC: if err := jwk.validateECParams(); err != nil { return err } case KeyTypeOct: if err := jwk.validateOctParams(); err != nil { return err } default: return errors.New("KeyType (kty) must be EC, RSA or Oct") } return nil }
[ "func (pk PublicKey) JWK() JWK {\n\tentry, ok := pk[JwkProperty]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tjson, ok := entry.(map[string]interface{})\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn NewJWK(json)\n}", "func (m *RemoteJwks) Validate() error {\n\treturn m.validate(false)\n}", "func validate_Groups_ValidateWKT_1(ctx context.Context, r json.RawMessage) (err error) {\n\treturn nil\n}", "func mustUnmarshalJWK(s string) *jose.JSONWebKey {\n\tret := &jose.JSONWebKey{}\n\tif err := json.Unmarshal([]byte(s), ret); err != nil {\n\t\tpanic(err)\n\t}\n\treturn ret\n}", "func (jwk *Jwk) validateRSAParams() error {\n\tif jwk.E < 1 {\n\t\treturn errors.New(\"RSA Required Param (E) is empty/default (<= 0)\")\n\t}\n\tif jwk.N == nil {\n\t\treturn errors.New(\"RSA Required Param (N) is nil\")\n\t}\n\n\tpOk := jwk.P != nil\n\tqOk := jwk.Q != nil\n\tdpOk := jwk.Dp != nil\n\tdqOk := jwk.Dq != nil\n\tqiOk := jwk.Qi != nil\n\tothOk := len(jwk.OtherPrimes) > 0\n\n\tparamsOR := pOk || qOk || dpOk || dqOk || qiOk\n\tparamsAnd := pOk && qOk && dpOk && dqOk && qiOk\n\n\tif jwk.D == nil {\n\t\tif (paramsOR || othOk) == true {\n\t\t\treturn errors.New(\"RSA first/second prime values are present but not Private key value (D)\")\n\t\t}\n\t} else {\n\t\tif paramsOR != paramsAnd {\n\t\t\treturn errors.New(\"Not all RSA first/second prime values are present or not present\")\n\t\t} else if !paramsOR && othOk {\n\t\t\treturn errors.New(\"RSA other primes is included but 1st, 2nd prime variables are missing\")\n\t\t} else if othOk {\n\t\t\tfor i, oth := range jwk.OtherPrimes {\n\t\t\t\tif oth.Coeff == nil {\n\t\t\t\t\treturn fmt.Errorf(\"Other Prime at index=%d, Coeff missing/nil\", i)\n\t\t\t\t} else if oth.R == nil {\n\t\t\t\t\treturn fmt.Errorf(\"Other Prime at index=%d, R missing/nil\", i)\n\t\t\t\t} else if oth.Exp == nil {\n\t\t\t\t\treturn fmt.Errorf(\"Other Prime at index=%d, Exp missing/nil\", i)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *JSONWebKey) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (wfe *WebFrontEndImpl) extractJWK(_ *http.Request, jws *jose.JSONWebSignature) (*jose.JSONWebKey, *acme.ProblemDetails) {\n\theader := jws.Signatures[0].Header\n\tkey := header.JSONWebKey\n\tif key == nil {\n\t\treturn nil, acme.MalformedProblem(\"No JWK in JWS header\")\n\t}\n\tif !key.Valid() {\n\t\treturn nil, acme.MalformedProblem(\"Invalid JWK in JWS header\")\n\t}\n\tif header.KeyID != \"\" {\n\t\treturn nil, acme.MalformedProblem(\"jwk and kid header fields are mutually exclusive.\")\n\t}\n\treturn key, nil\n}", "func VerifyWithJWK(buf []byte, key jwk.Key) (payload []byte, err error) {\n\n\tkeyVal, err := key.Materialize()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to materialize key: %w\", err)\n\t}\n\treturn Verify(buf, key.GetAlgorithm(), keyVal)\n}", "func (m *JwtComponent) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif v, ok := interface{}(m.GetPrivateKey()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn JwtComponentValidationError{\n\t\t\t\tfield: \"PrivateKey\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\tif v, ok := interface{}(m.GetPublicKey()).(interface{ Validate() error }); ok {\n\t\tif err := v.Validate(); err != nil {\n\t\t\treturn JwtComponentValidationError{\n\t\t\t\tfield: \"PublicKey\",\n\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\tcause: err,\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func NewJWK(jwk map[string]interface{}) JWK {\n\treturn jwk\n}", "func validate_Groups_ValidateWKT_0(ctx context.Context, r json.RawMessage) (err error) {\n\treturn nil\n}", "func (o *GetSlashingParametersOKBodyResult) Validate(formats strfmt.Registry) error {\n\treturn nil\n}", "func (m *JGroup) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAllowedDomains(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateCustomize(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDefaultChannels(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStackTemplates(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTitle(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (m *ThermalSimulationParameters) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAnisotropicStrainCoefficientsParallel(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateAnisotropicStrainCoefficientsPerpendicular(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateAnisotropicStrainCoefficientsZ(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateBeamDiameter(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateCoaxialAverageSensorZHeights(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateHatchSpacing(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateHeaterTemperature(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateIncludeStressAnalysis(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateInstantDynamicSensorLayers(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateInstantDynamicSensorRadius(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateInstantStaticSensorRadius(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLaserWattage(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLayerRotationAngle(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLayerThickness(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMeshResolutionFactor(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOutputCoaxialAverageSensorData(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOutputInstantDynamicSensor(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOutputInstantStaticSensor(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOutputPointProbe(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOutputPointThermalHistory(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOutputPrintRitePcsSensor(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOutputShrinkage(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOutputStateMap(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validatePrintRitePcsSensorRadius(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateScanSpeed(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSelectedPoints(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSlicingStripeWidth(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStartingLayerAngle(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (keySetter *KeySetter) Validate() []string {\n\tvar errorData []string = []string{}\n\tif keySetter.Key == \"\" {\n\t\terrorData = append(errorData, \"field 'key' is required\")\n\t}\n\tif keySetter.Value == \"\" || keySetter.Value == nil {\n\t\terrorData = append(errorData, \"field 'value' is required\")\n\t}\n\tif keySetter.Expiry < 0 {\n\t\terrorData = append(errorData, \"Enter a valid numerical expiry in ms\")\n\t}\n\treturn errorData\n}", "func (k Key) Validate() error {\n\n\t// check method\n\tif err := k.hasValidMethod(); err != nil {\n\t\treturn err\n\t}\n\n\t//check label\n\tif err := k.hasValidLabel(); err != nil {\n\t\treturn err\n\t}\n\n\t// check secret\n\tif err := k.hasValidSecret32(); err != nil {\n\t\treturn err\n\t}\n\n\t// check algo\n\tif err := k.hasValidAlgo(); err != nil {\n\t\treturn err\n\t}\n\n\t// check digits\n\tif err := k.hasValidDigits(); err != nil {\n\t\treturn err\n\t}\n\n\t// check period\n\tif err := k.hasValidPeriod(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func GetJWTPayload(ctx context.Context) (ValidatedJWTPayload, bool) {\n\tvalue := ctx.Value(key)\n\n\tpayload, ok := value.(ValidatedJWTPayload)\n\tif ok && payload.Validated {\n\t\treturn payload, true\n\t}\n\n\treturn ValidatedJWTPayload{}, false\n}", "func (job *Job) Validate(workflow *Workflow) error {\n\tvar missing []string\n\tinputParameters := make(map[string]string)\n\tfor _, param := range job.InputParameters {\n\t\tinputParameters[param.Name] = param.Value\n\t}\n\tfor _, key := range workflow.InputParameters {\n\t\tif _, ok := inputParameters[key]; !ok {\n\t\t\tmissing = append(missing, key)\n\t\t}\n\t}\n\tif len(missing) > 0 {\n\t\treturn errors.Errorf(ErrMsgMissingParamF, missing)\n\t}\n\treturn nil\n}", "func (o *GetSlashingParametersOKBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateResult(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ValidateRSAParams checks the RSA parameters of a RSA type of JWK. If a JWK is invalid an error will be returned describing the values that causes the validation to fail.
func (jwk *Jwk) validateRSAParams() error { if jwk.E < 1 { return errors.New("RSA Required Param (E) is empty/default (<= 0)") } if jwk.N == nil { return errors.New("RSA Required Param (N) is nil") } pOk := jwk.P != nil qOk := jwk.Q != nil dpOk := jwk.Dp != nil dqOk := jwk.Dq != nil qiOk := jwk.Qi != nil othOk := len(jwk.OtherPrimes) > 0 paramsOR := pOk || qOk || dpOk || dqOk || qiOk paramsAnd := pOk && qOk && dpOk && dqOk && qiOk if jwk.D == nil { if (paramsOR || othOk) == true { return errors.New("RSA first/second prime values are present but not Private key value (D)") } } else { if paramsOR != paramsAnd { return errors.New("Not all RSA first/second prime values are present or not present") } else if !paramsOR && othOk { return errors.New("RSA other primes is included but 1st, 2nd prime variables are missing") } else if othOk { for i, oth := range jwk.OtherPrimes { if oth.Coeff == nil { return fmt.Errorf("Other Prime at index=%d, Coeff missing/nil", i) } else if oth.R == nil { return fmt.Errorf("Other Prime at index=%d, R missing/nil", i) } else if oth.Exp == nil { return fmt.Errorf("Other Prime at index=%d, Exp missing/nil", i) } } } } return nil }
[ "func (priv *PKCS11PrivateKeyRSA) Validate() error {\n\tpub := priv.key.PubKey.(*rsa.PublicKey)\n\tif pub.E < 2 {\n\t\treturn errMalformedRSAKey\n\t}\n\t// The software implementation actively rejects 'large' public\n\t// exponents, in order to simplify its own implementation.\n\t// Here, instead, we expect the PKCS#11 library to enforce its\n\t// own preferred constraints, whatever they might be.\n\treturn nil\n}", "func buildJWKFromRSA(k *rsa.PublicKey) (*Key, error) {\n\treturn &Key{\n\t\tKeyType: \"RSA\",\n\t\tN: base64.RawURLEncoding.EncodeToString(k.N.Bytes()),\n\t\tE: base64.RawURLEncoding.EncodeToString(big.NewInt(int64(k.E)).Bytes()),\n\t}, nil\n}", "func CheckRSAKeyPair(pubKeyData, privKeyData []byte) error {\n\tprivKey, err := keyutil.ParsePrivateKeyPEM(privKeyData)\n\tif err != nil {\n\t\treturn err\n\t}\n\trsaPrivateKey, ok := privKey.(*rsa.PrivateKey)\n\tif !ok {\n\t\treturn fmt.Errorf(\"private key is not of rsa type\")\n\t}\n\tpubKeys, err := keyutil.ParsePublicKeysPEM(pubKeyData)\n\tif err != nil {\n\t\treturn err\n\t}\n\twantRSAPublicKey, ok := pubKeys[0].(*rsa.PublicKey)\n\tif !ok {\n\t\treturn fmt.Errorf(\"public key is not of rsa type\")\n\t}\n\t// The private key embeds the public key and the embedded key must\n\t// match the provided public key.\n\tif !reflect.DeepEqual(rsaPrivateKey.PublicKey, *wantRSAPublicKey) {\n\t\treturn fmt.Errorf(\"key pair do not match\")\n\t}\n\treturn nil\n}", "func (jwk *RSAPrivateJWK) PrivateRSA() (*rsa.PrivateKey, error) {\n\tmodulusBytes, err := base64.RawURLEncoding.DecodeString(jwk.ModulusBase64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmodulus := new(big.Int)\n\tmodulus = modulus.SetBytes(modulusBytes)\n\tpublicExponentBytes, err := base64.RawURLEncoding.DecodeString(jwk.PublicExponentBase64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor len(publicExponentBytes) < 8 {\n\t\tpublicExponentBytes = append(publicExponentBytes, 0)\n\t}\n\tpublicExponent := int(binary.LittleEndian.Uint64(publicExponentBytes))\n\tprivateExponentBytes, err := base64.RawURLEncoding.DecodeString(jwk.PrivateExponentBase64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tprivateExponent := new(big.Int)\n\tprivateExponent = privateExponent.SetBytes(privateExponentBytes)\n\tfirstPrimeFactorBytes, err := base64.RawURLEncoding.DecodeString(jwk.FirstPrimeFactorBase64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfirstPrimeFactor := new(big.Int)\n\tfirstPrimeFactor = firstPrimeFactor.SetBytes(firstPrimeFactorBytes)\n\tsecondPrimeFactorBytes, err := base64.RawURLEncoding.DecodeString(jwk.SecondPrimeFactorBase64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsecondPrimeFactor := new(big.Int)\n\tsecondPrimeFactor = secondPrimeFactor.SetBytes(secondPrimeFactorBytes)\n\tprivateExpModFirstPrimeMinusOneBytes, err := base64.RawURLEncoding.DecodeString(jwk.PrivateExpModFirstPrimeMinusOneBase64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tprivateExpModFirstPrimeMinusOne := new(big.Int)\n\tprivateExpModFirstPrimeMinusOne = privateExpModFirstPrimeMinusOne.SetBytes(privateExpModFirstPrimeMinusOneBytes)\n\tprivateExpModSecondPrimeMinusOneBytes, err := base64.RawURLEncoding.DecodeString(jwk.PrivateExpModSecondPrimeMinusOneBase64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tprivateExpModSecondPrimeMinusOne := new(big.Int)\n\tprivateExpModSecondPrimeMinusOne = privateExpModSecondPrimeMinusOne.SetBytes(privateExpModSecondPrimeMinusOneBytes)\n\tsecondPrimeInverseModFirstPrimeBytes, err := base64.RawURLEncoding.DecodeString(jwk.SecondPrimeInverseModFirstPrimeBase64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsecondPrimeInverseModFirstPrime := new(big.Int)\n\tsecondPrimeInverseModFirstPrime = secondPrimeInverseModFirstPrime.SetBytes(secondPrimeInverseModFirstPrimeBytes)\n\trsaPrivateKey := rsa.PrivateKey{\n\t\tPublicKey: rsa.PublicKey{\n\t\t\tN: modulus,\n\t\t\tE: publicExponent,\n\t\t},\n\t\tD: privateExponent,\n\t\tPrimes: []*big.Int{firstPrimeFactor, secondPrimeFactor},\n\t\tPrecomputed: rsa.PrecomputedValues{\n\t\t\tDp: privateExpModFirstPrimeMinusOne,\n\t\t\tDq: privateExpModSecondPrimeMinusOne,\n\t\t\tQinv: secondPrimeInverseModFirstPrime,\n\t\t},\n\t}\n\treturn &rsaPrivateKey, nil\n}", "func (jwk *RSAPublicJWK) PublicRSA() (*rsa.PublicKey, error) {\n\tmodulusBytes, err := base64.RawURLEncoding.DecodeString(jwk.ModulusBase64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmodulus := new(big.Int)\n\tmodulus = modulus.SetBytes(modulusBytes)\n\tpublicExponentBytes, err := base64.RawURLEncoding.DecodeString(jwk.PublicExponentBase64)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor len(publicExponentBytes) < 8 {\n\t\tpublicExponentBytes = append(publicExponentBytes, 0)\n\t}\n\tpublicExponent := int(binary.LittleEndian.Uint64(publicExponentBytes))\n\trsaPublicKey := rsa.PublicKey{\n\t\tN: modulus,\n\t\tE: publicExponent,\n\t}\n\treturn &rsaPublicKey, nil\n}", "func TestRsaJwk(t *testing.T) {\n\tfmt.Println(\"--> TestRsaJwk\")\n\tin := `{\n \"kty\": \"RSA\",\n \"n\": \"n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT-O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqVwGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj-oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuCLqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5gHdrNP5zw\",\n \"e\": \"AQAB\"\n }`\n\tvar out JsonWebKey\n\terr := json.Unmarshal([]byte(in), &out)\n\tif err != nil {\n\t\tt.Errorf(\"JSON unmarshal error: %+v\", err)\n\t\treturn\n\t}\n\n\tif out.KeyType != \"RSA\" {\n\t\tt.Errorf(\"Incorrect key type %+v, expecting %+v\", out.KeyType, \"RSA\")\n\t\treturn\n\t}\n\n\tif out.Rsa == nil {\n\t\tt.Errorf(\"RSA key not present\")\n\t\treturn\n\t}\n\n\tif out.Rsa.E != 0x010001 {\n\t\tt.Errorf(\"Incorrect public exponent %+v, expecting %+v\", out.Rsa.E, 0x010001)\n\t\treturn\n\t}\n\n\tnBytes := []byte{\n\t\t0x9f, 0x81, 0x0f, 0xb4, 0x03, 0x82, 0x73, 0xd0, 0x25, 0x91, 0xe4, 0x07, 0x3f, 0x31, 0xd2, 0xb6,\n\t\t0x00, 0x1b, 0x82, 0xce, 0xdb, 0x4d, 0x92, 0xf0, 0x50, 0x16, 0x5d, 0x47, 0xcf, 0xca, 0xb8, 0xa3,\n\t\t0xc4, 0x1c, 0xb7, 0x78, 0xac, 0x75, 0x53, 0x79, 0x3f, 0x8e, 0xf9, 0x75, 0x76, 0x8d, 0x1a, 0x23,\n\t\t0x74, 0xd8, 0x71, 0x25, 0x64, 0xc3, 0xbc, 0xd7, 0x7b, 0x9e, 0xa4, 0x34, 0x54, 0x48, 0x99, 0x40,\n\t\t0x7c, 0xff, 0x00, 0x99, 0x92, 0x0a, 0x93, 0x1a, 0x24, 0xc4, 0x41, 0x48, 0x52, 0xab, 0x29, 0xbd,\n\t\t0xb0, 0xa9, 0x5c, 0x06, 0x53, 0xf3, 0x6c, 0x60, 0xe6, 0x0b, 0xf9, 0x0b, 0x62, 0x58, 0xdd, 0xa5,\n\t\t0x6f, 0x37, 0x04, 0x7b, 0xa5, 0xc2, 0xd1, 0xd0, 0x29, 0xaf, 0x9c, 0x9d, 0x40, 0xba, 0xc7, 0xaa,\n\t\t0x41, 0xc7, 0x8a, 0x0d, 0xd1, 0x06, 0x8a, 0xdd, 0x69, 0x9e, 0x80, 0x8f, 0xea, 0x01, 0x1e, 0xa1,\n\t\t0x44, 0x1d, 0x8a, 0x4f, 0x7b, 0xb4, 0xe9, 0x7b, 0xe3, 0x9f, 0x55, 0xf1, 0xdd, 0xd4, 0x4e, 0x9c,\n\t\t0x4b, 0xa3, 0x35, 0x15, 0x97, 0x03, 0xd4, 0xd3, 0x4b, 0x60, 0x3e, 0x65, 0x14, 0x7a, 0x4f, 0x23,\n\t\t0xd6, 0xd3, 0xc0, 0x99, 0x6c, 0x75, 0xed, 0xee, 0x84, 0x6a, 0x82, 0xd1, 0x90, 0xae, 0x10, 0x78,\n\t\t0x3c, 0x96, 0x1c, 0xf0, 0x38, 0x7a, 0xed, 0x21, 0x06, 0xd2, 0xd0, 0x55, 0x5b, 0x6f, 0xd9, 0x37,\n\t\t0xfa, 0xd5, 0x53, 0x53, 0x87, 0xe0, 0xff, 0x72, 0xff, 0xbe, 0x78, 0x94, 0x14, 0x02, 0xb0, 0xb8,\n\t\t0x22, 0xea, 0x2a, 0x74, 0xb6, 0x05, 0x8c, 0x1d, 0xab, 0xf9, 0xb3, 0x4a, 0x76, 0xcb, 0x63, 0xb8,\n\t\t0x7f, 0xaa, 0x2c, 0x68, 0x47, 0xb8, 0xe2, 0x83, 0x7f, 0xff, 0x91, 0x18, 0x6e, 0x6b, 0x1c, 0x14,\n\t\t0x91, 0x1c, 0xf9, 0x89, 0xa8, 0x90, 0x92, 0xa8, 0x1c, 0xe6, 0x01, 0xdd, 0xac, 0xd3, 0xf9, 0xcf}\n\tn := big.NewInt(0)\n\tn.SetBytes(nBytes)\n\tif out.Rsa.N.Cmp(n) != 0 {\n\t\tt.Errorf(\"Incorrect modulus %+v, expecting %+v\", out.Rsa.N, n)\n\t\treturn\n\t}\n}", "func parseRSA(in []byte) (*rsa.PublicKey, error) {\n\tvar w struct {\n\t\tE *big.Int\n\t\tN *big.Int\n\t\tRest []byte `ssh:\"rest\"`\n\t}\n\tif err := ssh.Unmarshal(in, &w); err != nil {\n\t\treturn nil, errors.Wrap(err, \"error unmarshaling public key\")\n\t}\n\tif w.E.BitLen() > 24 {\n\t\treturn nil, errors.New(\"invalid public key: exponent too large\")\n\t}\n\te := w.E.Int64()\n\tif e < 3 || e&1 == 0 {\n\t\treturn nil, errors.New(\"invalid public key: incorrect exponent\")\n\t}\n\n\tvar key rsa.PublicKey\n\tkey.E = int(e)\n\tkey.N = w.N\n\treturn &key, nil\n}", "func prepareRSAKeys(privRSAPath, pubRSAPath string)(*rsa.PublicKey, *rsa.PrivateKey, error){\n pwd, _ := os.Getwd()\n\n verifyBytes, err := ioutil.ReadFile(pwd+pubRSAPath)\n if err != nil{\n return &rsa.PublicKey{}, &rsa.PrivateKey{}, GojwtErrInvalidEmptyPublicKey\n }\n\n verifiedKey, err := jwt.ParseRSAPublicKeyFromPEM(verifyBytes)\n if err != nil{\n return &rsa.PublicKey{}, &rsa.PrivateKey{}, GojwtErrIsNotPubRSAKey\n }\n\n signBytes, err := ioutil.ReadFile(pwd+privRSAPath)\n if err != nil{\n return &rsa.PublicKey{}, &rsa.PrivateKey{}, GojwtErrInvalidEmptyPrivateKey\n }\n\n signedKey, err := jwt.ParseRSAPrivateKeyFromPEM(signBytes)\n if err != nil{\n return &rsa.PublicKey{}, &rsa.PrivateKey{}, GojwtErrIsNotPrivRSAKey\n }\n \n return verifiedKey, signedKey, nil\n}", "func (jwk *Jwk) Validate() error {\n\n\t// If the alg parameter is set, make sure it matches the set JWK Type\n\tif len(jwk.Algorithm) > 0 {\n\t\talgKeyType := GetKeyType(jwk.Algorithm)\n\t\tif algKeyType != jwk.Type {\n\t\t\tfmt.Errorf(\"Jwk Type (kty=%v) doesn't match the algorithm key type (%v)\", jwk.Type, algKeyType)\n\t\t}\n\t}\n\tswitch jwk.Type {\n\tcase KeyTypeRSA:\n\t\tif err := jwk.validateRSAParams(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tcase KeyTypeEC:\n\t\tif err := jwk.validateECParams(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tcase KeyTypeOct:\n\t\tif err := jwk.validateOctParams(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tdefault:\n\t\treturn errors.New(\"KeyType (kty) must be EC, RSA or Oct\")\n\t}\n\n\treturn nil\n}", "func ValidateParams(k, m uint8) (*Params, error) {\n\tif k < 1 {\n\t\treturn nil, errors.New(\"k cannot be zero\")\n\t}\n\n\tif m < 1 {\n\t\treturn nil, errors.New(\"m cannot be zero\")\n\t}\n\n\tif k+m > 255 {\n\t\treturn nil, errors.New(\"(k + m) cannot be bigger than Galois field GF(2^8) - 1\")\n\t}\n\n\treturn &Params{\n\t\tK: k,\n\t\tM: m,\n\t}, nil\n}", "func parseRSAKey(key ssh.PublicKey) (*rsa.PublicKey, error) {\n\tvar sshWire struct {\n\t\tName string\n\t\tE *big.Int\n\t\tN *big.Int\n\t}\n\tif err := ssh.Unmarshal(key.Marshal(), &sshWire); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal key %v: %v\", key.Type(), err)\n\t}\n\treturn &rsa.PublicKey{N: sshWire.N, E: int(sshWire.E.Int64())}, nil\n}", "func GenRSAKey(len int, password string, kmPubFile, kmPrivFile, bpmPubFile, bpmPrivFile *os.File) error {\n\tif len == rsaLen2048 || len == rsaLen3072 {\n\t\tkey, err := rsa.GenerateKey(rand.Reader, len)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := writePrivKeyToFile(key, kmPrivFile, password); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := writePubKeyToFile(key.Public(), kmPubFile); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tkey, err = rsa.GenerateKey(rand.Reader, len)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := writePrivKeyToFile(key, bpmPrivFile, password); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := writePubKeyToFile(key.Public(), bpmPubFile); err != nil {\n\t\t\treturn err\n\n\t\t}\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"RSA key length must be 2048 or 3084 Bits, but length is: %d\", len)\n}", "func (c *PublicParametersManager) Validate() error {\n\treturn c.ppm.Validate()\n}", "func (j *JWKS) generateRSAKey() (crypto.PrivateKey, error) {\n\tif j.bits == 0 {\n\t\tj.bits = 2048\n\t}\n\tif j.bits < 2048 {\n\t\treturn nil, errors.Errorf(`jwks: key size must be at least 2048 bit for algorithm`)\n\t}\n\tkey, err := rsa.GenerateKey(rand.Reader, j.bits)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"jwks: unable to generate RSA key\")\n\t}\n\n\treturn key, nil\n}", "func PrivateKeyValidate(priv *rsa.PrivateKey,) error", "func NewRSA(encoding encodingType) (*RSA, error) {\n\tif encoding == \"\" {\n\t\tencoding = Base64\n\t}\n\treturn &RSA{Encoding: encoding}, nil\n}", "func (me *XsdGoPkgHasElem_RSAKeyValue) Walk() (err error) {\n\tif fn := WalkHandlers.XsdGoPkgHasElem_RSAKeyValue; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err = me.RSAKeyValue.Walk(); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\treturn\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func validateRSAPubKey(key in_toto.Key) error {\n\tif key.KeyType != \"rsa\" {\n\t\treturn fmt.Errorf(\"invalid KeyType for key '%s': should be 'rsa', got\"+\n\t\t\t\" '%s'\", key.KeyId, key.KeyType)\n\t}\n\tif key.Scheme != \"rsassa-pss-sha256\" {\n\t\treturn fmt.Errorf(\"invalid scheme for key '%s': should be \"+\n\t\t\t\"'rsassa-pss-sha256', got: '%s'\", key.KeyId, key.Scheme)\n\t}\n\tif err := validatePubKey(key); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (v *PublicParamsManager) Validate() error {\n\tpp := v.PublicParams()\n\tif pp == nil {\n\t\treturn errors.New(\"public parameters not set\")\n\t}\n\treturn pp.Validate()\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewIndexDB creates a new instance of IndexDB
func NewIndexDB(db store.DB, recordStore *RecordDB) *IndexDB { return &IndexDB{db: db, recordStore: recordStore} }
[ "func New(ng engine.Engine) (*DB, error) {\n\tdb := DB{\n\t\tng: ng,\n\t}\n\n\terr := db.Update(func(tx *Tx) error {\n\t\t_, err := tx.GetTable(indexTable)\n\t\tif err == ErrTableNotFound {\n\t\t\t_, err = tx.CreateTable(indexTable)\n\t\t}\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &db, nil\n}", "func NewDBIndex(stream *health.Stream, db *dbr.Connection, networkID uint32, chainID string, codec codec.Codec) *DB {\n\treturn &DB{\n\t\tnetworkID: networkID,\n\t\tchainID: chainID,\n\t\tcodec: codec,\n\t\tstream: stream,\n\t\tdb: db,\n\t}\n}", "func NewDbIndex(pg *pg.DB) *DbIndex {\n\tpg.AddQueryHook(dbLogger{})\n\ti := &DbIndex{\n\t\tpg: pg,\n\t\ttokensCache: map[string]int{},\n\t\ttokensM: sync.RWMutex{},\n\t\tdocumentsCache: map[string]int{},\n\t\tdocumentsM: sync.RWMutex{},\n\t\tinsertC: make(chan Occurrence),\n\t}\n\tgo i.flush()\n\treturn i\n}", "func NewIndex(addr, name, typ string, md *index.Metadata) (*Index, error) {\n\n\tfmt.Println(\"Get a new index: \", addr, name)\n client := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\t//MaxIdleConnsPerHost: 200,\n\t\t\tMaxIdleConnsPerHost: 2000000,\n\t\t},\n\t\tTimeout: 2500000 * time.Millisecond,\n\t}\n\tconn, err := elastic.NewClient(elastic.SetURL(addr), elastic.SetHttpClient(client))\n\tif err != nil {\n fmt.Println(\"Get error here\");\n\t\treturn nil, err\n\t}\n\tret := &Index{\n\t\tconn: conn,\n\t\tmd: md,\n\t\tname: name,\n\t\ttyp: typ,\n\t}\n fmt.Println(\"get here ======\");\n\n\treturn ret, nil\n\n}", "func NewTestDB() *TestDB {\n // Retrieve a temporary path.\n f, err := ioutil.TempFile(\"\", \"\")\n if err != nil {\n panic(\"temp file: \" + err.Error())\n }\n path := f.Name()\n f.Close()\n os.Remove(path)\n // Open the database.\n db, err := Open(path, []byte(\"URL_Index\"), 0600)\n if err != nil {\n\t\tpanic(\"open: \" + err.Error())\n }\n // Return wrapped type.\n return &TestDB{db}\n}", "func newDocumentIndex(opts *iface.CreateDocumentDBOptions) iface.StoreIndex {\n\treturn &documentIndex{\n\t\tindex: map[string][]byte{},\n\t\topts: opts,\n\t}\n}", "func New() *Index {\n\treturn &Index{Version: Version}\n}", "func NewIndexedDB(conn *rpcc.Conn) *IndexedDB {\n\treturn &IndexedDB{conn: conn}\n}", "func openNewIndex() bleve.Index {\n\tfmt.Println(\"openNewIndex 0\")\n\n\tif mainIndex != nil {\n\t\tfmt.Println(\"openNewIndex 1a\")\n\t\treturn mainIndex\n\t}\n\n\t// Name(path) of the index\n\tindexPath := \"goProjectDemo.bleve\"\n\tmapping := bleve.NewIndexMapping()\n\tindex, err := bleve.New(indexPath, mapping)\n\n\tif err != nil {\n\t\tindex, err = bleve.Open(indexPath)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Error/opening index/bleve\", err)\n\t\t}\n\t}\n\n\tmainIndex = index\n\tfmt.Println(\"openNewIndex 1b\")\n\treturn index\n}", "func (s *shard) initIndexDatabase() error {\n\tvar err error\n\tstoreOption := kv.DefaultStoreOption(filepath.Join(s.path, indexParentDir))\n\ts.indexStore, err = newKVStoreFunc(storeOption.Path, storeOption)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.forwardFamily, err = s.indexStore.CreateFamily(\n\t\tforwardIndexDir,\n\t\tkv.FamilyOption{\n\t\t\tCompactThreshold: 0,\n\t\t\tMerger: string(tagindex.SeriesForwardMerger)})\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.invertedFamily, err = s.indexStore.CreateFamily(\n\t\tinvertedIndexDir,\n\t\tkv.FamilyOption{\n\t\t\tCompactThreshold: 0,\n\t\t\tMerger: string(tagindex.SeriesInvertedMerger)})\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.indexDB, err = newIndexDBFunc(\n\t\tcontext.TODO(),\n\t\tfilepath.Join(s.path, metaDir),\n\t\ts.metadata, s.forwardFamily,\n\t\ts.invertedFamily)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *shard) initIndexDatabase() error {\n\tvar err error\n\tstoreOption := kv.DefaultStoreOption(filepath.Join(s.path, indexParentDir))\n\ts.indexStore, err = newKVStoreFunc(storeOption.Path, storeOption)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.forwardFamily, err = s.indexStore.CreateFamily(\n\t\tforwardIndexDir,\n\t\tkv.FamilyOption{\n\t\t\tCompactThreshold: 0,\n\t\t\tMerger: string(invertedindex.SeriesForwardMerger)})\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.invertedFamily, err = s.indexStore.CreateFamily(\n\t\tinvertedIndexDir,\n\t\tkv.FamilyOption{\n\t\t\tCompactThreshold: 0,\n\t\t\tMerger: string(invertedindex.SeriesInvertedMerger)})\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.indexDB, err = newIndexDBFunc(\n\t\tcontext.TODO(),\n\t\tfilepath.Join(s.path, metaDir),\n\t\ts.metadata, s.forwardFamily,\n\t\ts.invertedFamily)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func New(path string) (ip *Indexio, err error) {\n\tvar i Indexio\n\t// Initialize functions map for marshaling and unmarshaling\n\tfm := turtleDB.NewFuncsMap(marshal, unmarshal)\n\t// Create new instance of turtleDB\n\tif i.db, err = turtleDB.New(\"indexio\", path, fm); err != nil {\n\t\treturn\n\t}\n\t// Initialize indexes bucket\n\tif err = i.db.Update(initBucket); err != nil {\n\t\treturn\n\t}\n\t// Assign ip as a pointer to i\n\tip = &i\n\treturn\n}", "func New(ds datastore.TxnDatastore, api *apistruct.FullNodeStruct) (*Index, error) {\n\tcs := chainsync.New(api)\n\tstore, err := chainstore.New(txndstr.Wrap(ds, \"chainstore\"), cs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinitMetrics()\n\tctx, cancel := context.WithCancel(context.Background())\n\ts := &Index{\n\t\tapi: api,\n\t\tstore: store,\n\t\tsignaler: signaler.New(),\n\t\tindex: IndexSnapshot{\n\t\t\tMiners: make(map[string]Slashes),\n\t\t},\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tfinished: make(chan struct{}),\n\t}\n\tif err := s.loadFromDS(); err != nil {\n\t\treturn nil, err\n\t}\n\tgo s.start()\n\treturn s, nil\n}", "func newQueueIndex(dataDir string) (*queueIndex, error) {\n\tindexFile := path.Join(dataDir, cIndexFileName)\n\tindexArena, err := newArena(indexFile, cIndexFileSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &queueIndex{\n\t\tindexFile: indexFile,\n\t\tindexArena: indexArena,\n\t}, nil\n}", "func NewIndex(kind IndexKind, table string) Index {\n\treturn &index{\n\t\tkind: kind,\n\t\ttable: table,\n\t}\n}", "func NewIndex() *Index {\n\treturn &Index{root: &node{}}\n}", "func NewStorage(host, user, password, dbname, sslmode string) (sorted.KeyValue, error) {\n\tconninfo := fmt.Sprintf(\"user=%s dbname=%s host=%s password=%s sslmode=%s\", user, dbname, host, password, sslmode)\n\tdb, err := sql.Open(\"postgres\", conninfo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &myIndexStorage{\n\t\tdb: db,\n\t\tStorage: &sqlindex.Storage{\n\t\t\tDB: db,\n\t\t\tSetFunc: altSet,\n\t\t\tBatchSetFunc: altBatchSet,\n\t\t\tPlaceHolderFunc: replacePlaceHolders,\n\t\t},\n\t\thost: host,\n\t\tuser: user,\n\t\tpassword: password,\n\t\tdatabase: dbname,\n\t}, nil\n}", "func New(ctx context.Context, ng engine.Engine) (*DB, error) {\n\treturn newDatabase(ctx, ng, database.Options{Codec: msgpack.NewCodec()})\n}", "func New(project *Project, metainfo *metainfo.Client, streams streams.Store, segments segments.Store, encStore *encryption.Store) *DB {\n\treturn &DB{\n\t\tproject: project,\n\t\tmetainfo: metainfo,\n\t\tstreams: streams,\n\t\tsegments: segments,\n\t\tencStore: encStore,\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
UpdateLastKnownPulse must be called after updating TopSyncPulse
func (i *IndexDB) UpdateLastKnownPulse(ctx context.Context, topSyncPulse insolar.PulseNumber) error { i.lock.Lock() defer i.lock.Unlock() indexes, err := i.ForPulse(ctx, topSyncPulse) if err != nil && err != ErrIndexNotFound { return errors.Wrapf(err, "failed to get indexes for pulse: %d", topSyncPulse) } for idx := range indexes { inslogger.FromContext(ctx).Debugf("UpdateLastKnownPulse. pulse: %d, object: %s", topSyncPulse, indexes[idx].ObjID.DebugString()) if err := i.setLastKnownPN(topSyncPulse, indexes[idx].ObjID); err != nil { return errors.Wrapf(err, "can't setLastKnownPN. objId: %s. pulse: %d", indexes[idx].ObjID.DebugString(), topSyncPulse) } } return nil }
[ "func (t *tick) updateLast() {\n\tif t.passed() {\n\t\tt.last = t.now\n\t}\n}", "func (ps *RandomPeerSelector) UpdateLast(peer uint32) {\n\tps.last = peer\n}", "func (trd *trxDispatcher) updateLastSeenBlock() {\n\t// get the current value\n\tlsb := trd.blkObserver.Load()\n\tlog.Noticef(\"last seen block is #%d\", lsb)\n\n\t// make the change in the database so the progress persists\n\terr := repo.UpdateLastKnownBlock((*hexutil.Uint64)(&lsb))\n\tif err != nil {\n\t\tlog.Errorf(\"could not update last seen block; %s\", err.Error())\n\t}\n}", "func (l *Latency) UpdateLast(m Metadata) { l.update(m, true) }", "func (worker *VbucketWorker) SyncPulse() error {\n\tcmd := []interface{}{vwCmdSyncPulse}\n\treturn c.FailsafeOpAsync(worker.datach, cmd, worker.runFinCh)\n}", "func (ppe *PlatformPulseExtractor) fetchCurrentPulse(ctx context.Context) (uint32, error) {\n\tclient := ppe.client\n\trequest := &exporter.GetTopSyncPulse{}\n\tlog := belogger.FromContext(ctx)\n\tlog.Debug(\"Fetching top sync pulse\")\n\n\ttsp, err := client.TopSyncPulse(ctx, request)\n\tif err != nil {\n\t\tlog.WithField(\"request\", request).Error(errors.Wrap(err, \"failed to get TopSyncPulse\").Error())\n\t\treturn 0, err\n\t}\n\n\tlog.Debug(\"Received top sync pulse \", tsp.PulseNumber)\n\treturn tsp.PulseNumber, nil\n}", "func (m *PulseManager) Set(ctx context.Context, newPulse insolar.Pulse) error {\n\tm.setLock.Lock()\n\tdefer m.setLock.Unlock()\n\tif m.stopped {\n\t\treturn errors.New(\"can't call Set method on PulseManager after stop\")\n\t}\n\n\tctx, logger := inslogger.WithField(ctx, \"new_pulse\", newPulse.PulseNumber.String())\n\tlogger.Debug(\"received pulse\")\n\n\tctx, span := instracer.StartSpan(\n\t\tctx, \"PulseManager.Set\", trace.WithSampler(trace.AlwaysSample()),\n\t)\n\tspan.AddAttributes(\n\t\ttrace.Int64Attribute(\"pulse.PulseNumber\", int64(newPulse.PulseNumber)),\n\t)\n\tdefer span.End()\n\n\t// Dealing with node lists.\n\tlogger.Debug(\"dealing with node lists.\")\n\t{\n\t\tfromNetwork := m.NodeNet.GetAccessor(newPulse.PulseNumber).GetWorkingNodes()\n\t\tif len(fromNetwork) == 0 {\n\t\t\tlogger.Errorf(\"received zero nodes for pulse %d\", newPulse.PulseNumber)\n\t\t\treturn nil\n\t\t}\n\t\ttoSet := make([]insolar.Node, 0, len(fromNetwork))\n\t\tfor _, n := range fromNetwork {\n\t\t\ttoSet = append(toSet, insolar.Node{ID: n.ID(), Role: n.Role()})\n\t\t}\n\t\terr := m.NodeSetter.Set(newPulse.PulseNumber, toSet)\n\t\tif err != nil {\n\t\t\tpanic(errors.Wrap(err, \"call of SetActiveNodes failed\"))\n\t\t}\n\t}\n\n\tstoragePulse, err := m.PulseAccessor.Latest(ctx)\n\tif err == pulse.ErrNotFound {\n\t\tstoragePulse = *insolar.GenesisPulse\n\t} else if err != nil {\n\t\treturn errors.Wrap(err, \"call of GetLatestPulseNumber failed\")\n\t}\n\n\tfor _, d := range m.dispatchers {\n\t\td.ClosePulse(ctx, storagePulse)\n\t}\n\n\terr = m.JetModifier.Clone(ctx, storagePulse.PulseNumber, newPulse.PulseNumber, false)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to clone jet.Tree fromPulse=%v toPulse=%v\", storagePulse.PulseNumber, newPulse.PulseNumber)\n\t}\n\n\tif err := m.PulseAppender.Append(ctx, newPulse); err != nil {\n\t\treturn errors.Wrap(err, \"call of AddPulse failed\")\n\t}\n\n\terr = m.LogicRunner.OnPulse(ctx, storagePulse, newPulse)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, d := range m.dispatchers {\n\t\td.BeginPulse(ctx, newPulse)\n\t}\n\n\treturn nil\n}", "func updateLastAppended(s *followerReplication, req *pb.AppendEntriesRequest) {\n\t// Mark any inflight logs as committed\n\tif logs := req.Entries; len(logs) > 0 {\n\t\tlast := logs[len(logs)-1]\n\t\tatomic.StoreUint64(&s.nextIndex, last.Index+1)\n\t\ts.commitment.match(s.peer.ID, last.Index)\n\t}\n\n\t// Notify still leader\n\ts.notifyAll(true)\n}", "func (p *Peer) UpdateLastAnnouncedBlock(blkHash *chainhash.Hash) {\n\tlog.Tracef(\"Updating last blk for peer %v, %v\", p.addr, blkHash)\n\n\tp.statsMtx.Lock()\n\tp.lastAnnouncedBlock = blkHash\n\tp.statsMtx.Unlock()\n}", "func (mmOnPulseFromConsensus *GatewayMock) OnPulseFromConsensusAfterCounter() uint64 {\n\treturn mm_atomic.LoadUint64(&mmOnPulseFromConsensus.afterOnPulseFromConsensusCounter)\n}", "func updateKnownBeats(in chan heartbeat.Beat, statusReport chan error) {\n\ntimer:\n\tfor {\n\t\tvar interval time.Duration\n\t\tif heartbeat.GetCoordinator() || heartbeat.GetFeasibleCoordinator() {\n\t\t\tinterval = 5 * time.Second\n\t\t} else {\n\t\t\tinterval = 30 * time.Second\n\t\t}\n\t\tcoordTimer := time.After(interval)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-coordTimer:\n\t\t\t\tlog.Debug(\"interval expired\")\n\t\t\t\t// when interval expires, delete beats of nodes that haven't been seen for 21 seconds and evaluate\n\t\t\t\tlog.WithField(\"nodes\", knownBeats.GetNodes()).Debug(\"Aging out nodes\")\n\t\t\t\tknownBeats.AgeOut()\n\t\t\t\t// update the beatmap with our own heartbeat\n\t\t\t\tknownBeats[node.Self.ID] = heartbeat.NewBeat()\n\t\t\t\tlog.WithField(\"nodes\", knownBeats.GetNodes()).Debug(\"Evaluating nodes\")\n\t\t\t\tc, f := knownBeats.ToBeats().Evaluate(heartbeat.GetCoordinator(), heartbeat.GetFeasibleCoordinator(), node.Self.ID)\n\t\t\t\theartbeat.SetCoordinator(c)\n\t\t\t\theartbeat.SetFeasibleCoordinator(f)\n\t\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\t\"coordinators\": knownBeats.ToBeats().CoordCount(),\n\t\t\t\t\t\"feasibleCoordinators\": knownBeats.ToBeats().FeasCount(),\n\t\t\t\t\t\"is_coordinator\": heartbeat.GetCoordinator(),\n\t\t\t\t\t\"is_feasible\": heartbeat.GetFeasibleCoordinator(),\n\t\t\t\t}).Info(\"Finished evaluating feasible/coordinators\")\n\t\t\t\tstatusReport <- heartbeat.RoutineNormal{Timestamp: time.Now()}\n\t\t\t\tcontinue timer\n\t\t\tcase b := <-in:\n\t\t\t\tlog.Debug(\"beat in\")\n\t\t\t\tknownBeats[b.ID] = b\n\t\t\t}\n\t\t}\n\t}\n}", "func (e *Endpoint) UpdateLastConnection() {\n\te.LastConnection = time.Now().UTC()\n}", "func (p *Peer) runUpdateSyncing() {\n\ttimer := time.NewTimer(p.streamer.syncUpdateDelay)\n\tdefer timer.Stop()\n\n\tselect {\n\tcase <-timer.C:\n\tcase <-p.streamer.quit:\n\t\treturn\n\t}\n\n\tkad := p.streamer.delivery.kad\n\tpo := chunk.Proximity(p.BzzAddr.Over(), kad.BaseAddr())\n\n\tdepth := kad.NeighbourhoodDepth()\n\n\tlog.Debug(\"update syncing subscriptions: initial\", \"peer\", p.ID(), \"po\", po, \"depth\", depth)\n\n\t// initial subscriptions\n\tp.updateSyncSubscriptions(syncSubscriptionsDiff(po, -1, depth, kad.MaxProxDisplay))\n\n\tdepthChangeSignal, unsubscribeDepthChangeSignal := kad.SubscribeToNeighbourhoodDepthChange()\n\tdefer unsubscribeDepthChangeSignal()\n\n\tprevDepth := depth\n\tfor {\n\t\tselect {\n\t\tcase _, ok := <-depthChangeSignal:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// update subscriptions for this peer when depth changes\n\t\t\tdepth := kad.NeighbourhoodDepth()\n\t\t\tlog.Debug(\"update syncing subscriptions\", \"peer\", p.ID(), \"po\", po, \"depth\", depth)\n\t\t\tp.updateSyncSubscriptions(syncSubscriptionsDiff(po, prevDepth, depth, kad.MaxProxDisplay))\n\t\t\tprevDepth = depth\n\t\tcase <-p.streamer.quit:\n\t\t\treturn\n\t\t}\n\t}\n\tlog.Debug(\"update syncing subscriptions: exiting\", \"peer\", p.ID())\n}", "func (f *Input) syncLastPollFiles(ctx context.Context) {\n\tvar buf bytes.Buffer\n\tenc := json.NewEncoder(&buf)\n\n\t// Encode the number of known files\n\tif err := enc.Encode(len(f.knownFiles)); err != nil {\n\t\tf.Errorw(\"Failed to encode known files\", zap.Error(err))\n\t\treturn\n\t}\n\n\t// Encode each known file\n\tfor _, fileReader := range f.knownFiles {\n\t\tif err := enc.Encode(fileReader); err != nil {\n\t\t\tf.Errorw(\"Failed to encode known files\", zap.Error(err))\n\t\t}\n\t}\n\n\tif err := f.persister.Set(ctx, knownFilesKey, buf.Bytes()); err != nil {\n\t\tf.Errorw(\"Failed to sync to database\", zap.Error(err))\n\t}\n}", "func (s *Storage) GetPulseByPrev(prevPulse models.Pulse) (models.Pulse, error) {\n\ttimer := prometheus.NewTimer(GetPulseByPrevDuration)\n\tdefer timer.ObserveDuration()\n\n\tvar pulse models.Pulse\n\terr := s.db.Where(\"prev_pulse_number = ?\", prevPulse.PulseNumber).First(&pulse).Error\n\treturn pulse, err\n}", "func (mmOnPulseFromPulsar *GatewayMock) OnPulseFromPulsarAfterCounter() uint64 {\n\treturn mm_atomic.LoadUint64(&mmOnPulseFromPulsar.afterOnPulseFromPulsarCounter)\n}", "func (_Stakingbindings *StakingbindingsCaller) LastUpdateTime(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Stakingbindings.contract.Call(opts, &out, \"lastUpdateTime\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (buf *queueBuffer) updateLast(newNode *messageNode) {\n\tif buf.last != nil {\n\t\tbuf.last.next = newNode\n\t}\n\tbuf.last = newNode\n}", "func (huo *HistorytakingUpdateOne) AddPulse(i int) *HistorytakingUpdateOne {\n\thuo.mutation.AddPulse(i)\n\treturn huo\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
TruncateHead remove all records after lastPulse
func (i *IndexDB) TruncateHead(ctx context.Context, from insolar.PulseNumber) error { i.lock.Lock() defer i.lock.Unlock() it := i.db.NewIterator(&indexKey{objID: *insolar.NewID(pulse.MinTimePulse, nil), pn: from}, false) defer it.Close() var hasKeys bool for it.Next() { hasKeys = true key := newIndexKey(it.Key()) err := i.db.Delete(&key) if err != nil { return errors.Wrapf(err, "can't delete key: %+v", key) } inslogger.FromContext(ctx).Debugf("Erased key. Pulse number: %s. ObjectID: %s", key.pn.String(), key.objID.String()) } if !hasKeys { inslogger.FromContext(ctx).Infof("No records. Nothing done. Pulse number: %s", from.String()) } return nil }
[ "func (r *RecordDB) TruncateHead(ctx context.Context, from insolar.PulseNumber) error {\n\n\tif err := r.truncateRecordsHead(ctx, from); err != nil {\n\t\treturn errors.Wrap(err, \"failed to truncate records head\")\n\t}\n\n\tif err := r.truncatePositionRecordHead(ctx, recordPositionKey{pn: from}, recordPositionKeyPrefix); err != nil {\n\t\treturn errors.Wrap(err, \"failed to truncate record positions head\")\n\t}\n\n\tif err := r.truncatePositionRecordHead(ctx, lastKnownRecordPositionKey{pn: from}, lastKnownRecordPositionKeyPrefix); err != nil {\n\t\treturn errors.Wrap(err, \"failed to truncate last known record positions head\")\n\t}\n\n\treturn nil\n}", "func (m *MemoryStorage) truncate(count int) {\n\tif len(m.entries) <= count {\n\t\treturn\n\t}\n\n\ttruncatePulses := m.entries[:len(m.entries)-count]\n\tm.entries = m.entries[len(truncatePulses):]\n\tfor _, p := range truncatePulses {\n\t\tdelete(m.snapshotEntries, p.PulseNumber)\n\t}\n}", "func (m *dropTruncaterMock) TruncateHead(p context.Context, p1 insolar.PulseNumber) (r error) {\n\tcounter := atomic.AddUint64(&m.TruncateHeadPreCounter, 1)\n\tdefer atomic.AddUint64(&m.TruncateHeadCounter, 1)\n\n\tif len(m.TruncateHeadMock.expectationSeries) > 0 {\n\t\tif counter > uint64(len(m.TruncateHeadMock.expectationSeries)) {\n\t\t\tm.t.Fatalf(\"Unexpected call to dropTruncaterMock.TruncateHead. %v %v\", p, p1)\n\t\t\treturn\n\t\t}\n\n\t\tinput := m.TruncateHeadMock.expectationSeries[counter-1].input\n\t\ttestify_assert.Equal(m.t, *input, dropTruncaterMockTruncateHeadInput{p, p1}, \"headTruncater.TruncateHead got unexpected parameters\")\n\n\t\tresult := m.TruncateHeadMock.expectationSeries[counter-1].result\n\t\tif result == nil {\n\t\t\tm.t.Fatal(\"No results are set for the dropTruncaterMock.TruncateHead\")\n\t\t\treturn\n\t\t}\n\n\t\tr = result.r\n\n\t\treturn\n\t}\n\n\tif m.TruncateHeadMock.mainExpectation != nil {\n\n\t\tinput := m.TruncateHeadMock.mainExpectation.input\n\t\tif input != nil {\n\t\t\ttestify_assert.Equal(m.t, *input, dropTruncaterMockTruncateHeadInput{p, p1}, \"headTruncater.TruncateHead got unexpected parameters\")\n\t\t}\n\n\t\tresult := m.TruncateHeadMock.mainExpectation.result\n\t\tif result == nil {\n\t\t\tm.t.Fatal(\"No results are set for the dropTruncaterMock.TruncateHead\")\n\t\t}\n\n\t\tr = result.r\n\n\t\treturn\n\t}\n\n\tif m.TruncateHeadFunc == nil {\n\t\tm.t.Fatalf(\"Unexpected call to dropTruncaterMock.TruncateHead. %v %v\", p, p1)\n\t\treturn\n\t}\n\n\treturn m.TruncateHeadFunc(p, p1)\n}", "func (l *List) Truncate() {\n\tl.nodes = l.nodes[:1]\n\t// point sentinel.prev at itself\n\ts := l.getNode(sentinelId)\n\ts.next = tower{}\n\ts.prev = sentinelId\n\tl.updateNode(s)\n\tl.checkpoint = nodeId(1)\n\tl.count = 0\n}", "func (l *Log) Truncate(lowest uint64) error {\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\tvar segments []*segment\n\tfor _, s := range l.segments {\n\t\tif s.nextOffset <= lowest+1 {\n\t\t\tif err := s.Remove(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tsegments = append(segments, s)\n\t}\n\tl.segments = segments\n\treturn nil\n}", "func (l *Log) Truncate(lowest uint64) error {\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\n\tvar segments []*segment\n\tfor _, seg := range l.segments {\n\t\tif latestOffset := seg.nextOffset - 1; latestOffset <= lowest {\n\t\t\tif err := seg.Remove(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tsegments = append(segments, seg)\n\t}\n\n\tl.segments = segments\n\n\treturn nil\n}", "func (wal *WAL) TruncateBefore(o Offset) error {\n\tcutoff := sequenceToFilename(o.FileSequence())\n\t_, latestOffset, err := wal.Latest()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to determine latest offset: %v\", err)\n\t}\n\tlatestSequence := latestOffset.FileSequence()\n\treturn wal.forEachSegment(func(file os.FileInfo, first bool, last bool) (bool, error) {\n\t\tif last || file.Name() >= cutoff {\n\t\t\t// Files are sorted by name, if we've gotten past the cutoff or\n\t\t\t// encountered the last (active) file, don't bother continuing.\n\t\t\treturn false, nil\n\t\t}\n\t\tif filenameToSequence(file.Name()) == latestSequence {\n\t\t\t// Don't delete the file containing the latest valid entry\n\t\t\treturn true, nil\n\t\t}\n\t\trmErr := os.Remove(filepath.Join(wal.dir, file.Name()))\n\t\tif rmErr != nil {\n\t\t\treturn false, rmErr\n\t\t}\n\t\twal.log.Debugf(\"Removed WAL file %v\", filepath.Join(wal.dir, file.Name()))\n\t\treturn true, nil\n\t})\n}", "func (b *ChangeBuffer) truncateAfter(e list.Element) {\n\t// We must iterate and remove elements one by one to avoid memory leaks\n\tfirst := e.Next()\n\tfor first != nil {\n\t\tif first.Next() != nil {\n\t\t\tfirst = first.Next()\n\t\t\tb.Remove(first.Prev())\n\t\t} else {\n\t\t\tb.Remove(first)\n\t\t\tfirst = nil\n\t\t}\n\t}\n}", "func (t *BoundedTable) Truncate(ctx context.Context) error {\n\t// just reset everything.\n\tfor i := int64(0); i < t.capacity; i++ {\n\t\tatomic.StorePointer(&t.records[i], unsafe.Pointer(nil))\n\t}\n\tt.cursor = 0\n\treturn nil\n}", "func (rf *Raft) TruncateLog(lastAppliedIndex int) {\n\trf.mu.Lock()\n\tif lastAppliedIndex <= rf.lastApplied && lastAppliedIndex >= rf.Log.LastIncludedLength {\n\t\trf.Log.LastIncludedTerm = rf.Log.Entries[lastAppliedIndex-rf.Log.LastIncludedLength].Term\n\t\trf.Log.Entries = rf.Log.Entries[(lastAppliedIndex - rf.Log.LastIncludedLength + 1):]\n\t\trf.Log.LastIncludedLength = lastAppliedIndex + 1\n\t}\n\trf.mu.Unlock()\n}", "func (r *walReader) truncate(lastOffset int64) error {\n\tr.logger.Log(\"msg\", \"WAL corruption detected; truncating\",\n\t\t\"err\", r.err, \"file\", r.current().Name(), \"pos\", lastOffset)\n\n\t// Close and delete all files after the current one.\n\tfor _, f := range r.wal.files[r.cur+1:] {\n\t\tif err := f.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := os.Remove(f.Name()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tr.wal.files = r.wal.files[:r.cur+1]\n\n\t// Seek the current file to the last valid offset where we continue writing from.\n\t_, err := r.current().Seek(lastOffset, os.SEEK_SET)\n\treturn err\n}", "func (l *Ledger) Truncate(utxovmLastID []byte) error {\n\tl.xlog.Info(\"start truncate ledger\", \"blockid\", utils.F(utxovmLastID))\n\n\t// 获取账本锁\n\tl.mutex.Lock()\n\tdefer l.mutex.Unlock()\n\n\tbatchWrite := l.baseDB.NewBatch()\n\tnewMeta := proto.Clone(l.meta).(*pb.LedgerMeta)\n\tnewMeta.TipBlockid = utxovmLastID\n\n\t// 获取裁剪目标区块信息\n\tblock, err := l.fetchBlock(utxovmLastID)\n\tif err != nil {\n\t\tl.xlog.Warn(\"failed to find utxovm last block\", \"err\", err, \"blockid\", utils.F(utxovmLastID))\n\t\treturn err\n\t}\n\t// 查询分支信息\n\tbranchTips, err := l.GetBranchInfo(block.Blockid, block.Height)\n\tif err != nil {\n\t\tl.xlog.Warn(\"failed to find all branch tips\", \"err\", err)\n\t\treturn err\n\t}\n\n\t// 逐个分支裁剪到目标高度\n\tfor _, branchTip := range branchTips {\n\t\tdeletedBlockid := []byte(branchTip)\n\t\t// 裁剪到目标高度\n\t\terr = l.removeBlocks(deletedBlockid, block.Blockid, batchWrite)\n\t\tif err != nil {\n\t\t\tl.xlog.Warn(\"failed to remove garbage blocks\", \"from\", utils.F(l.meta.TipBlockid),\n\t\t\t\t\"to\", utils.F(block.Blockid))\n\t\t\treturn err\n\t\t}\n\t\t// 更新分支高度信息\n\t\terr = l.updateBranchInfo(block.Blockid, deletedBlockid, block.Height, batchWrite)\n\t\tif err != nil {\n\t\t\tl.xlog.Warn(\"truncate failed when calling updateBranchInfo\", \"err\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tnewMeta.TrunkHeight = block.Height\n\tmetaBuf, err := proto.Marshal(newMeta)\n\tif err != nil {\n\t\tl.xlog.Warn(\"failed to marshal pb meta\")\n\t\treturn err\n\t}\n\tbatchWrite.Put([]byte(pb.MetaTablePrefix), metaBuf)\n\terr = batchWrite.Write()\n\tif err != nil {\n\t\tl.xlog.Warn(\"batch write failed when truncate\", \"err\", err)\n\t\treturn err\n\t}\n\tl.meta = newMeta\n\n\tl.xlog.Info(\"truncate blockid succeed\")\n\treturn nil\n}", "func (t Table) Truncate(ctx context.Context) error {\n\tkeys, err := t.getKeys(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"get table keys: %w\", err)\n\t}\n\n\titems, err := t.scan(ctx, keys)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"scan: %w\", err)\n\t}\n\n\tlog.Printf(\"[%s] contains %d items\\n\", t.name, len(items))\n\n\tif err := t.batchDelete(ctx, items); err != nil {\n\t\treturn fmt.Errorf(\"batch delete: %w\", err)\n\t}\n\n\tlog.Printf(\"[%s] complete to truncate table\\n\", t.name)\n\n\treturn nil\n}", "func (rl *RollingLog) trim() {\n\tlastValidTime := time.Now().Add(-rl.keepfor)\n\n\te := rl.entries.Front()\n\tfor e != nil && e.Value.(LogEntry).timestamp.Before(lastValidTime) {\n\t\tnext := e.Next()\n\t\trl.entries.Remove(e)\n\t\te = next\n\t}\n}", "func (c *Chunk) TruncateTo(numRows int) {\n\tfor _, col := range c.columns {\n\t\tif col.isFixed() {\n\t\t\telemLen := len(col.elemBuf)\n\t\t\tcol.data = col.data[:numRows*elemLen]\n\t\t} else {\n\t\t\tcol.data = col.data[:col.offsets[numRows]]\n\t\t\tcol.offsets = col.offsets[:numRows+1]\n\t\t}\n\t\tfor i := numRows; i < col.length; i++ {\n\t\t\tif col.isNull(i) {\n\t\t\t\tcol.nullCount--\n\t\t\t}\n\t\t}\n\t\tcol.length = numRows\n\t\tcol.nullBitmap = col.nullBitmap[:(col.length>>3)+1]\n\t}\n\tc.numVirtualRows = numRows\n}", "func (b *ChangeBuffer) truncateBefore(e list.Element) {\n\tlast := e.Prev()\n\tfor last != nil {\n\t\tif last.Prev() != nil {\n\t\t\tlast = last.Prev()\n\t\t\tb.Remove(last.Next())\n\t\t} else {\n\t\t\tb.Remove(last)\n\t\t\tlast = nil\n\t\t}\n\t}\n}", "func (snapshots EBSSnapshots) TrimHead(n int) EBSSnapshots {\n\tif n > len(snapshots) {\n\t\treturn EBSSnapshots{}\n\t}\n\treturn snapshots[n:]\n}", "func resetHead(rjc pb.RoutedJournalClient, journal pb.Journal, done func()) {\n\tdefer done()\n\n\tvar ctx = context.Background()\n\tvar r = client.NewReader(ctx, rjc, pb.ReadRequest{\n\t\tJournal: journal,\n\t\tOffset: -1,\n\t\tBlock: false,\n\t\tMetadataOnly: true,\n\t})\n\tif _, err := r.Read(nil); err != client.ErrOffsetNotYetAvailable {\n\t\tmbp.Must(err, \"failed to read head of journal\", \"journal\", journal)\n\t}\n\t// Issue a zero-byte write at the indexed head.\n\tvar a = client.NewAppender(ctx, rjc, pb.AppendRequest{\n\t\tJournal: journal,\n\t\tOffset: r.Response.Offset,\n\t})\n\tvar err = a.Close()\n\n\tif err == nil {\n\t\tlog.WithField(\"journal\", journal).Info(\"reset write head\")\n\t} else if err == client.ErrWrongAppendOffset {\n\t\tlog.WithField(\"journal\", journal).Info(\"did not reset (raced writes)\")\n\t} else {\n\t\tmbp.Must(err, \"failed to reset journal offset\", \"journal\", journal)\n\t}\n}", "func (seq Sequence) Truncate(width int, resolution time.Duration, asOf time.Time, until time.Time) (result Sequence) {\n\tif len(seq) == 0 {\n\t\treturn nil\n\t}\n\tresult = seq\n\toldUntil := result.Until()\n\tasOf = RoundTimeUntilDown(asOf, resolution, oldUntil)\n\tuntil = RoundTimeUntilDown(until, resolution, oldUntil)\n\n\tif !until.IsZero() {\n\t\tperiodsToRemove := int(oldUntil.Sub(until) / resolution)\n\t\tif periodsToRemove > 0 {\n\t\t\tbytesToRemove := periodsToRemove * width\n\t\t\tif bytesToRemove+Width64bits >= len(seq) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tresult = result[bytesToRemove:]\n\t\t\tresult.SetUntil(until)\n\t\t}\n\t}\n\n\tif !asOf.IsZero() {\n\t\tmaxPeriods := int(result.Until().Sub(asOf) / resolution)\n\t\tif maxPeriods <= 0 {\n\t\t\t// Entire sequence falls outside of truncation range\n\t\t\treturn nil\n\t\t}\n\t\tmaxLength := Width64bits + maxPeriods*width\n\t\tif maxLength >= len(result) {\n\t\t\treturn result\n\t\t}\n\t\treturn result[:maxLength]\n\t}\n\n\treturn result\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ForID returns a lifeline from a bucket with provided PN and ObjID
func (i *IndexDB) ForID(ctx context.Context, pn insolar.PulseNumber, objID insolar.ID) (record.Index, error) { var buck *record.Index buck, err := i.getBucket(pn, objID) if err == ErrIndexNotFound { lastPN, err := i.getLastKnownPN(objID) if err != nil { return record.Index{}, ErrIndexNotFound } buck, err = i.getBucket(lastPN, objID) if err != nil { return record.Index{}, err } } else if err != nil { return record.Index{}, err } return *buck, nil }
[ "func (s *Storage) GetLifeline(objRef []byte, fromIndex *string, pulseNumberLt, pulseNumberGt, timestampLte, timestampGte *int64, limit, offset int, sortByIndexAsc bool) ([]models.Record, int, error) {\n\ttimer := prometheus.NewTimer(GetLifelineDuration)\n\tdefer timer.ObserveDuration()\n\n\tquery := s.db.Model(&models.Record{}).Where(\"object_reference = ?\", objRef).Where(\"type = ?\", models.State)\n\n\tquery = filterByPulse(query, pulseNumberLt, pulseNumberGt)\n\n\tquery = filterByTimestamp(query, timestampLte, timestampGte)\n\n\tvar err error\n\tif fromIndex != nil {\n\t\tquery, err = filterRecordsByIndex(query, *fromIndex, sortByIndexAsc)\n\t\tif err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\t}\n\n\tquery = sortRecordsByDirection(query, sortByIndexAsc)\n\n\trecords, total, err := getRecords(query, limit, offset)\n\tif err != nil {\n\t\treturn nil, 0, errors.Wrapf(err, \"error while select records for object %v from db\", objRef)\n\t}\n\treturn records, total, nil\n}", "func hostGetObjectId(objId int32, keyId int32, typeId int32) int32", "func getSkSLIdObjName(id string) string {\n\treturn fmt.Sprintf(\"%s/%s\", skslPrefix, id)\n}", "func (t *targetrunner) httpobjget(w http.ResponseWriter, r *http.Request) {\n\tvar (\n\t\tconfig = cmn.GCO.Get()\n\t\tquery = r.URL.Query()\n\t\tisGFNRequest = cmn.IsParseBool(query.Get(cmn.URLParamIsGFNRequest))\n\t)\n\tapiItems, err := t.checkRESTItems(w, r, 2, false, cmn.Version, cmn.Objects)\n\tif err != nil {\n\t\treturn\n\t}\n\tbucket, objName := apiItems[0], apiItems[1]\n\tstarted := time.Now()\n\tif redirDelta := t.redirectLatency(started, query); redirDelta != 0 {\n\t\tt.statsT.Add(stats.GetRedirLatency, redirDelta)\n\t}\n\trangeOff, rangeLen, err := t.offsetAndLength(query)\n\tif err != nil {\n\t\tt.invalmsghdlr(w, r, err.Error())\n\t\treturn\n\t}\n\tbck, err := newBckFromQuery(bucket, r.URL.Query())\n\tif err != nil {\n\t\tt.invalmsghdlr(w, r, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tlom := &cluster.LOM{T: t, ObjName: objName}\n\tif err = lom.Init(bck.Bck, config); err != nil {\n\t\tif _, ok := err.(*cmn.ErrorRemoteBucketDoesNotExist); ok {\n\t\t\tt.BMDVersionFixup(r, cmn.Bck{}, true /* sleep */)\n\t\t\terr = lom.Init(bck.Bck, config)\n\t\t}\n\t\tif err != nil {\n\t\t\tt.invalmsghdlr(w, r, err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\tgoi := &getObjInfo{\n\t\tstarted: started,\n\t\tt: t,\n\t\tlom: lom,\n\t\tw: w,\n\t\tctx: t.contextWithAuth(r.Header),\n\t\toffset: rangeOff,\n\t\tlength: rangeLen,\n\t\tisGFN: isGFNRequest,\n\t\tchunked: config.Net.HTTP.Chunked,\n\t}\n\tif err, errCode := goi.getObject(); err != nil {\n\t\tif cmn.IsErrConnectionReset(err) {\n\t\t\tglog.Errorf(\"GET %s: %v\", lom, err)\n\t\t} else {\n\t\t\tt.invalmsghdlr(w, r, err.Error(), errCode)\n\t\t}\n\t}\n}", "func (dao *LendingPairDao) GetByLendingID(term uint64, lendingAddress common.Address) (*types.LendingPair, error) {\n\tvar res types.LendingPair\n\tquery := bson.M{\"lendingTokenAddress\": lendingAddress.Hex(), \"term\": strconv.FormatUint(term, 10)}\n\terr := db.GetOne(dao.dbName, dao.collectionName, query, &res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &res, nil\n}", "func LHashFromObj(conn *client.Connection, objRef client.ObjectRef) *LHash {\n\treturn &LHash{\n\t\tConn: conn,\n\t\tObjRef: objRef,\n\t}\n}", "func (t *badgerTableVersion) getObjKey(id, objID []byte) []byte {\n\tprefix := []byte(t.prefix + \"object/\")\n\tprefix = append(prefix, id...)\n\tprefix = append(prefix, '/')\n\n\treturn append(prefix, objID...)\n}", "func (llrb *LLRB) ID() string {\n\treturn llrb.name\n}", "func (t *targetrunner) httpobjget(w http.ResponseWriter, r *http.Request) {\n\tvar (\n\t\tnhobj cksumvalue\n\t\tbucket, objname, fqn string\n\t\tuname, errstr, version string\n\t\tsize int64\n\t\tprops *objectProps\n\t\tstarted time.Time\n\t\terrcode int\n\t\tcoldget, vchanged, inNextTier bool\n\t)\n\tstarted = time.Now()\n\tcksumcfg := &ctx.config.Cksum\n\tversioncfg := &ctx.config.Ver\n\tct := t.contextWithAuth(r)\n\tapitems := t.restAPIItems(r.URL.Path, 5)\n\tif apitems = t.checkRestAPI(w, r, apitems, 2, Rversion, Robjects); apitems == nil {\n\t\treturn\n\t}\n\tbucket, objname = apitems[0], apitems[1]\n\tif !t.validatebckname(w, r, bucket) {\n\t\treturn\n\t}\n\toffset, length, readRange, errstr := t.validateOffsetAndLength(r)\n\tif errstr != \"\" {\n\t\tt.invalmsghdlr(w, r, errstr)\n\t\treturn\n\t}\n\n\tbucketmd := t.bmdowner.get()\n\tislocal := bucketmd.islocal(bucket)\n\terrstr, errcode = t.checkLocalQueryParameter(bucket, r, islocal)\n\tif errstr != \"\" {\n\t\tt.invalmsghdlr(w, r, errstr, errcode)\n\t\treturn\n\t}\n\n\t// lockname(ro)\n\tfqn, uname = t.fqn(bucket, objname, islocal), uniquename(bucket, objname)\n\tt.rtnamemap.lockname(uname, false, &pendinginfo{Time: time.Now(), fqn: fqn}, time.Second)\n\n\t// existence, access & versioning\n\tif coldget, size, version, errstr = t.lookupLocally(bucket, objname, fqn); islocal && errstr != \"\" {\n\t\terrcode = http.StatusInternalServerError\n\t\t// given certain conditions (below) make an effort to locate the object cluster-wide\n\t\tif strings.Contains(errstr, doesnotexist) {\n\t\t\terrcode = http.StatusNotFound\n\t\t\taborted, running := t.xactinp.isAbortedOrRunningRebalance()\n\t\t\tif aborted || running {\n\t\t\t\tif props := t.getFromNeighbor(bucket, objname, r, islocal); props != nil {\n\t\t\t\t\tsize, nhobj = props.size, props.nhobj\n\t\t\t\t\tgoto existslocally\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t_, p := bucketmd.get(bucket, islocal)\n\t\t\t\tif p.NextTierURL != \"\" {\n\t\t\t\t\tif inNextTier, errstr, errcode = t.objectInNextTier(p.NextTierURL, bucket, objname); inNextTier {\n\t\t\t\t\t\tprops, errstr, errcode = t.getObjectNextTier(p.NextTierURL, bucket, objname, fqn)\n\t\t\t\t\t\tif errstr == \"\" {\n\t\t\t\t\t\t\tsize, nhobj = props.size, props.nhobj\n\t\t\t\t\t\t\tgoto existslocally\n\t\t\t\t\t\t}\n\t\t\t\t\t\tglog.Errorf(\"Error getting object from next tier after successful lookup, err: %s,\"+\n\t\t\t\t\t\t\t\" HTTP status code: %d\", errstr, errcode)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tt.invalmsghdlr(w, r, errstr, errcode)\n\t\tt.rtnamemap.unlockname(uname, false)\n\t\treturn\n\t}\n\n\tif !coldget && !islocal {\n\t\tif versioncfg.ValidateWarmGet && (version != \"\" &&\n\t\t\tt.versioningConfigured(bucket)) {\n\t\t\tif vchanged, errstr, errcode = t.checkCloudVersion(\n\t\t\t\tct, bucket, objname, version); errstr != \"\" {\n\t\t\t\tt.invalmsghdlr(w, r, errstr, errcode)\n\t\t\t\tt.rtnamemap.unlockname(uname, false)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// TODO: add a knob to return what's cached while upgrading the version async\n\t\t\tcoldget = vchanged\n\t\t}\n\t}\n\tif !coldget && cksumcfg.ValidateWarmGet && cksumcfg.Checksum != ChecksumNone {\n\t\tvalidChecksum, errstr := t.validateObjectChecksum(fqn, cksumcfg.Checksum, size)\n\t\tif errstr != \"\" {\n\t\t\tt.invalmsghdlr(w, r, errstr, http.StatusInternalServerError)\n\t\t\tt.rtnamemap.unlockname(uname, false)\n\t\t\treturn\n\t\t}\n\t\tif !validChecksum {\n\t\t\tif islocal {\n\t\t\t\tif err := os.Remove(fqn); err != nil {\n\t\t\t\t\tglog.Warningf(\"Bad checksum, failed to remove %s/%s, err: %v\", bucket, objname, err)\n\t\t\t\t}\n\t\t\t\tt.invalmsghdlr(w, r, fmt.Sprintf(\"Bad checksum %s/%s\", bucket, objname), http.StatusInternalServerError)\n\t\t\t\tt.rtnamemap.unlockname(uname, false)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcoldget = true\n\t\t}\n\t}\n\tif coldget {\n\t\tt.rtnamemap.unlockname(uname, false)\n\t\tif props, errstr, errcode = t.coldget(ct, bucket, objname, false); errstr != \"\" {\n\t\t\tif errcode == 0 {\n\t\t\t\tt.invalmsghdlr(w, r, errstr)\n\t\t\t} else {\n\t\t\t\tt.invalmsghdlr(w, r, errstr, errcode)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tsize, nhobj = props.size, props.nhobj\n\t}\n\nexistslocally:\n\t// note: coldget() keeps the read lock if successful\n\tdefer t.rtnamemap.unlockname(uname, false)\n\n\t//\n\t// local file => http response\n\t//\n\tif size == 0 {\n\t\tglog.Warningf(\"Unexpected: object %s/%s size is 0 (zero)\", bucket, objname)\n\t}\n\treturnRangeChecksum := readRange && cksumcfg.EnableReadRangeChecksum\n\tif !coldget && !returnRangeChecksum && cksumcfg.Checksum != ChecksumNone {\n\t\thashbinary, errstr := Getxattr(fqn, XattrXXHashVal)\n\t\tif errstr == \"\" && hashbinary != nil {\n\t\t\tnhobj = newcksumvalue(cksumcfg.Checksum, string(hashbinary))\n\t\t}\n\t}\n\tif nhobj != nil && !returnRangeChecksum {\n\t\thtype, hval := nhobj.get()\n\t\tw.Header().Add(HeaderDfcChecksumType, htype)\n\t\tw.Header().Add(HeaderDfcChecksumVal, hval)\n\t}\n\tif props != nil && props.version != \"\" {\n\t\tw.Header().Add(HeaderDfcObjVersion, props.version)\n\t}\n\n\tfile, err := os.Open(fqn)\n\tif err != nil {\n\t\tif os.IsPermission(err) {\n\t\t\terrstr = fmt.Sprintf(\"Permission denied: access forbidden to %s\", fqn)\n\t\t\tt.invalmsghdlr(w, r, errstr, http.StatusForbidden)\n\t\t} else {\n\t\t\terrstr = fmt.Sprintf(\"Failed to open local file %s, err: %v\", fqn, err)\n\t\t\tt.invalmsghdlr(w, r, errstr, http.StatusInternalServerError)\n\t\t}\n\t\treturn\n\t}\n\n\tdefer file.Close()\n\tif readRange {\n\t\tsize = length\n\t}\n\tslab := selectslab(size)\n\tbuf := slab.alloc()\n\tdefer slab.free(buf)\n\n\tif cksumcfg.Checksum != ChecksumNone && returnRangeChecksum {\n\t\tslab := selectslab(length)\n\t\tbuf := slab.alloc()\n\t\treader := io.NewSectionReader(file, offset, length)\n\t\txxhashval, errstr := ComputeXXHash(reader, buf, xxhash.New64())\n\t\tslab.free(buf)\n\t\tif errstr != \"\" {\n\t\t\ts := fmt.Sprintf(\"Unable to compute checksum for byte range, offset:%d, length:%d from %s, err: %s\", offset, length, fqn, errstr)\n\t\t\tt.invalmsghdlr(w, r, s, http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tw.Header().Add(HeaderDfcChecksumType, cksumcfg.Checksum)\n\t\tw.Header().Add(HeaderDfcChecksumVal, xxhashval)\n\t}\n\n\tvar written int64\n\tif readRange {\n\t\treader := io.NewSectionReader(file, offset, length)\n\t\twritten, err = io.CopyBuffer(w, reader, buf)\n\t} else {\n\t\t// copy\n\t\twritten, err = io.CopyBuffer(w, file, buf)\n\t}\n\tif err != nil {\n\t\terrstr = fmt.Sprintf(\"Failed to send file %s, err: %v\", fqn, err)\n\t\tglog.Errorln(t.errHTTP(r, errstr, http.StatusInternalServerError))\n\t\tt.statsif.add(\"numerr\", 1)\n\t\treturn\n\t}\n\tif !coldget {\n\t\tgetatimerunner().touch(fqn)\n\t}\n\tif glog.V(4) {\n\t\ts := fmt.Sprintf(\"GET: %s/%s, %.2f MB, %d µs\", bucket, objname, float64(written)/MiB, time.Since(started)/1000)\n\t\tif coldget {\n\t\t\ts += \" (cold)\"\n\t\t}\n\t\tglog.Infoln(s)\n\t}\n\n\tdelta := time.Since(started)\n\tt.statsdC.Send(\"get\",\n\t\tstatsd.Metric{\n\t\t\tType: statsd.Counter,\n\t\t\tName: \"count\",\n\t\t\tValue: 1,\n\t\t},\n\t\tstatsd.Metric{\n\t\t\tType: statsd.Timer,\n\t\t\tName: \"latency\",\n\t\t\tValue: float64(delta / time.Millisecond),\n\t\t},\n\t)\n\n\tt.statsif.addMany(\"numget\", int64(1), \"getlatency\", int64(delta/1000))\n}", "func (r *RecordDB) ForID(ctx context.Context, id insolar.ID) (record.Material, error) {\n\treturn r.get(id)\n}", "func (s Store) GetFromID(id string) (l *Lease) {\n\tnewl := &Lease{}\n\ti, err := strconv.ParseInt(id, 10, 64)\n\tif err != nil {\n\t\tlogger.Error(\"Id conversion error\", err)\n\t\treturn nil\n\t}\n\tnewl, _ = s.leases.ID(i)\n\treturn newl\n}", "func (o LakeOutput) LakeId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Lake) pulumi.StringOutput { return v.LakeId }).(pulumi.StringOutput)\n}", "func NewObjectID(id string) GrapheneObject {\n\tgid := new(ObjectID)\n\tif err := gid.Parse(id); err != nil {\n\t\tlogging.Errorf(\n\t\t\t\"ObjectID parser error %v\",\n\t\t\terrors.Annotate(err, \"Parse\"),\n\t\t)\n\t\treturn nil\n\t}\n\n\treturn gid\n}", "func (me *contentDirectoryService) objectFromID(id string) (o object, err error) {\n\to.Path, err = url.QueryUnescape(id)\n\tif err != nil {\n\t\treturn\n\t}\n\tif o.Path == \"0\" {\n\t\to.Path = \"/\"\n\t}\n\t// o.Path = path.Clean(o.Path)\n\t// if !path.IsAbs(o.Path) {\n\t// \terr = fmt.Errorf(\"bad ObjectID %v\", o.Path)\n\t// \treturn\n\t// }\n\to.RootObjectPath = me.RootObjectPath\n\n\treturn\n}", "func (o *GetInterceptionitemsParams) SetLiid(liid string) {\n\to.Liid = liid\n}", "func (s *service) GetJobSeekerByID(ID string) (JobSeekerFormat, error) {\n\t// validate input ID is not negative number\n\tif err := helper.ValidateIDNumber(ID); err != nil {\n\t\treturn JobSeekerFormat{}, err\n\t}\n\n\tjobSeeker, err := s.repository.FindByID(ID)\n\n\tif err != nil {\n\t\treturn JobSeekerFormat{}, err\n\t}\n\n\tif jobSeeker.ID == 0 {\n\t\treturn JobSeekerFormat{}, errors.New(\"job seeker id not found\")\n\t}\n\tformatJobSeeker := FormatJobSeeker(jobSeeker)\n\treturn formatJobSeeker, nil\n}", "func LoadByJobRunID(ctx context.Context, m *gorpmapper.Mapper, db gorp.SqlExecutor, jobRunId int64, itemTypes []string, opts ...gorpmapper.GetOptionFunc) ([]sdk.CDNItem, error) {\n\tquery := gorpmapper.NewQuery(`\n\t\tSELECT *\n\t\tFROM item\n\t\tWHERE api_ref->>'node_run_job_id' = $1\n\t\tAND type = ANY($2)\n\t\tAND to_delete = false\n\t\tORDER BY created DESC\n\t`).Args(strconv.FormatInt(jobRunId, 10), pq.StringArray(itemTypes))\n\treturn getItems(ctx, m, db, query, opts...)\n}", "func (*GetGLAByIdRq) Descriptor() ([]byte, []int) {\n\treturn file_account_proto_rawDescGZIP(), []int{1}\n}", "func (db *DB) GetLadderFromHashId(hashId string) (Ladder, error) {\n\t// find ladder from its hash\n\tsqlStatement := \"SELECT id, name, owner, hashid, method FROM ladders WHERE hashid = $1\"\n\trow := db.QueryRow(sqlStatement, hashId)\n\tvar ladder Ladder\n\terr := row.Scan(&ladder.Id, &ladder.Name, &ladder.Owner, &ladder.HashId, &ladder.Method)\n\tif err != nil {\n\t\treturn Ladder{}, err\n\t}\n\treturn ladder, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WaitReady waits for machinecontroller and its webhook to become ready
func WaitReady(s *state.State) error { if !s.Cluster.MachineController.Deploy { return nil } s.Logger.Infoln("Waiting for machine-controller to come up...") if err := cleanupStaleResources(s.Context, s.DynamicClient); err != nil { return err } if err := waitForWebhook(s.Context, s.DynamicClient); err != nil { return err } if err := waitForMachineController(s.Context, s.DynamicClient); err != nil { return err } return waitForCRDs(s) }
[ "func WaitReady(ctx *util.Context) error {\n\tif !ctx.Cluster.MachineController.Deploy {\n\t\treturn nil\n\t}\n\n\tctx.Logger.Infoln(\"Waiting for machine-controller to come up…\")\n\n\t// Wait a bit to let scheduler to react\n\ttime.Sleep(10 * time.Second)\n\n\tif err := WaitForWebhook(ctx.DynamicClient); err != nil {\n\t\treturn errors.Wrap(err, \"machine-controller-webhook did not come up\")\n\t}\n\n\tif err := WaitForMachineController(ctx.DynamicClient); err != nil {\n\t\treturn errors.Wrap(err, \"machine-controller did not come up\")\n\t}\n\treturn nil\n}", "func waitForWebhook(ctx context.Context, client dynclient.Client) error {\n\tcondFn := clientutil.PodsReadyCondition(ctx, client, dynclient.ListOptions{\n\t\tNamespace: resources.MachineControllerNameSpace,\n\t\tLabelSelector: labels.SelectorFromSet(map[string]string{\n\t\t\tappLabelKey: resources.MachineControllerWebhookName,\n\t\t}),\n\t})\n\n\treturn fail.KubeClient(wait.PollUntilContextTimeout(ctx, 5*time.Second, 3*time.Minute, false, condFn.WithContext()), \"waiting for machine-controller webhook to became ready\")\n}", "func waitForMachineController(ctx context.Context, client dynclient.Client) error {\n\tcondFn := clientutil.PodsReadyCondition(ctx, client, dynclient.ListOptions{\n\t\tNamespace: resources.MachineControllerNameSpace,\n\t\tLabelSelector: labels.SelectorFromSet(map[string]string{\n\t\t\tappLabelKey: resources.MachineControllerName,\n\t\t}),\n\t})\n\n\treturn fail.KubeClient(wait.PollUntilContextTimeout(ctx, 5*time.Second, 3*time.Minute, false, condFn.WithContext()), \"waiting for machine-controller to became ready\")\n}", "func WaitForReady() {\n\tinstance.WaitForReady()\n}", "func waitForWebhook(ctx context.Context, log *zap.SugaredLogger, client ctrlruntimeclient.Client, kubermaticNamespace string) error {\n\t// wait for the webhook to be ready\n\ttimeout := 30 * time.Second\n\tendpoint := types.NamespacedName{Namespace: kubermaticNamespace, Name: \"seed-webhook\"}\n\n\tlog.Infow(\"waiting for webhook to be ready...\", \"webhook\", endpoint, \"timeout\", timeout)\n\tif err := wait.Poll(500*time.Millisecond, timeout, func() (bool, error) {\n\t\tendpoints := &corev1.Endpoints{}\n\t\tif err := client.Get(ctx, endpoint, endpoints); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn len(endpoints.Subsets) > 0, nil\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"failed to wait for webhook: %v\", err)\n\t}\n\tlog.Info(\"webhook is ready\")\n\n\treturn nil\n}", "func WaitForReady() {\n\tdefaultClient.WaitForReady()\n}", "func (c *Client) WaitUntilReady() {\n\tc.waitUntilReady()\n}", "func (envManager *TestEnvManager) WaitUntilReady() (bool, error) {\n\tlog.Println(\"Start checking components' status\")\n\tretry := u.Retrier{\n\t\tBaseDelay: 1 * time.Second,\n\t\tMaxDelay: 10 * time.Second,\n\t\tRetries: 8,\n\t}\n\n\tready := false\n\tretryFn := func(_ context.Context, i int) error {\n\t\tfor _, comp := range envManager.testEnv.GetComponents() {\n\t\t\tif alive, err := comp.IsAlive(); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to comfirm compoment %s is alive %v\", comp.GetName(), err)\n\t\t\t} else if !alive {\n\t\t\t\treturn fmt.Errorf(\"component %s is not alive\", comp.GetName())\n\t\t\t}\n\t\t}\n\n\t\tready = true\n\t\tlog.Println(\"All components are ready\")\n\t\treturn nil\n\t}\n\n\t_, err := retry.Retry(context.Background(), retryFn)\n\treturn ready, err\n}", "func WaitForReady() {\n\tucc.WaitForReady()\n}", "func (s *Server) WaitUntilReady() {\n\t_, _ = <-s.readyCh\n}", "func (w *waitForReadyConfig) waitForReadyCondition(ctx context.Context, name string, initialVersion string, start time.Time,\n\ttimeoutTimer *time.Timer, errorWindow time.Duration, options Options, msgCallback MessageCallback) (retry bool, timeoutReached bool, err error) {\n\n\twatcher, err := w.watchMaker(ctx, name, initialVersion, options.timeoutWithDefault())\n\tif err != nil {\n\t\treturn false, false, err\n\t}\n\tdefer watcher.Stop()\n\t// channel used to transport the error that has been received\n\terrChan := make(chan error)\n\n\tvar errorTimer *time.Timer\n\t// Stop error timer if it has been started because of\n\t// a ConditionReady has been set to false\n\tdefer (func() {\n\t\tif errorTimer != nil {\n\t\t\terrorTimer.Stop()\n\t\t\terrorTimer = nil\n\t\t}\n\t})()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn false, false, ctx.Err()\n\t\tcase <-timeoutTimer.C:\n\t\t\t// We reached a timeout without receiving a \"Ready\" == \"True\" event\n\t\t\treturn false, true, nil\n\t\tcase err = <-errChan:\n\t\t\t// The error timer fired and we have not received a recovery event (\"True\" / \"Unknown\") in the\n\t\t\t// meantime. So the error status is considered to be final.\n\t\t\treturn false, false, err\n\t\tcase event, ok := <-watcher.ResultChan():\n\t\t\tif !ok || event.Object == nil {\n\t\t\t\treturn true, false, nil\n\t\t\t}\n\n\t\t\t// Skip event if its not a MODIFIED event, as only MODIFIED events update the condition\n\t\t\t// we are looking for.\n\t\t\t// This will filter out all synthetic ADDED events that created bt the API server for\n\t\t\t// the initial state. See https://kubernetes.io/docs/reference/using-api/api-concepts/#the-resourceversion-parameter\n\t\t\t// for details:\n\t\t\t// \"Get State and Start at Most Recent: Start a watch at the most recent resource version,\n\t\t\t// which must be consistent (i.e. served from etcd via a quorum read). To establish initial state,\n\t\t\t// the watch begins with synthetic “Added” events of all resources instances that exist at the starting\n\t\t\t// resource version. All following watch events are for all changes that occurred after the resource\n\t\t\t// version the watch started at.\"\n\t\t\tif event.Type != watch.Modified {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Check whether resource is in sync already (meta.generation == status.observedGeneration)\n\t\t\tinSync, err := generationCheck(event.Object)\n\t\t\tif err != nil {\n\t\t\t\treturn false, false, err\n\t\t\t}\n\n\t\t\t// Skip events if generations has not yet been consolidated, regardless of type.\n\t\t\t// Wait for the next event to come in until the generations align\n\t\t\tif !inSync {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tconditions, err := w.conditionsExtractor(event.Object)\n\t\t\tif err != nil {\n\t\t\t\treturn false, false, err\n\t\t\t}\n\t\t\tfor _, cond := range conditions {\n\t\t\t\tif cond.Type == apis.ConditionReady {\n\t\t\t\t\tswitch cond.Status {\n\t\t\t\t\tcase corev1.ConditionTrue:\n\t\t\t\t\t\t// Any error timer running will be cancelled by the defer method that has been set above\n\t\t\t\t\t\treturn false, false, nil\n\t\t\t\t\tcase corev1.ConditionFalse:\n\t\t\t\t\t\t// Fire up a timer waiting for the error window duration to still allow to reconcile\n\t\t\t\t\t\t// to a true condition even after the condition went to false. If this is not the case within\n\t\t\t\t\t\t// this window, then an error is returned.\n\t\t\t\t\t\t// If there is already a timer running, we just log.\n\t\t\t\t\t\tif errorTimer == nil {\n\t\t\t\t\t\t\terr := fmt.Errorf(\"%s: %s\", cond.Reason, cond.Message)\n\t\t\t\t\t\t\terrorTimer = time.AfterFunc(errorWindow, func() {\n\t\t\t\t\t\t\t\terrChan <- err\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t}\n\t\t\t\t\tcase corev1.ConditionUnknown:\n\t\t\t\t\t\t// If an errorTimer is triggered because of a previous \"False\" event, but now\n\t\t\t\t\t\t// we received an \"Unknown\" event during the error window, cancel the error timer\n\t\t\t\t\t\t// to avoid to receive an error signal.\n\t\t\t\t\t\tif errorTimer != nil {\n\t\t\t\t\t\t\terrorTimer.Stop()\n\t\t\t\t\t\t\terrorTimer = nil\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif cond.Message != \"\" {\n\t\t\t\t\t\tmsgCallback(time.Since(start), cond.Message)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (a *VppAdapter) WaitReady() error {\n\treturn nil\n}", "func waitReady(project, name, region string) error {\n\twait := time.Minute * 4\n\tdeadline := time.Now().Add(wait)\n\tfor time.Now().Before(deadline) {\n\t\tsvc, err := getService(project, name, region)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to query Service for readiness: %w\", err)\n\t\t}\n\n\t\tfor _, cond := range svc.Status.Conditions {\n\t\t\tif cond.Type == \"Ready\" {\n\t\t\t\tif cond.Status == \"True\" {\n\t\t\t\t\treturn nil\n\t\t\t\t} else if cond.Status == \"False\" {\n\t\t\t\t\treturn fmt.Errorf(\"reason=%s message=%s\", cond.Reason, cond.Message)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(time.Second * 2)\n\t}\n\treturn fmt.Errorf(\"the service did not become ready in %s, check Cloud Console for logs to see why it failed\", wait)\n}", "func (a *Agent) WaitReady() {\n\ta.statusLock.RLock()\n\tdefer a.statusLock.RUnlock()\n\n\tfor {\n\t\tif a.status == 1 {\n\t\t\treturn\n\t\t}\n\t\ta.statusCond.Wait()\n\t}\n}", "func (p *Pebble) WaitReady(t *testing.T) {\n\tif p.pebbleCMD.Process == nil {\n\t\tt.Fatal(\"Pebble not started\")\n\t}\n\turl := p.DirectoryURL()\n\tRetry(t, 10, 10*time.Millisecond, func() error {\n\t\tctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)\n\t\tdefer cancel()\n\n\t\tt.Log(\"Checking pebble readiness\")\n\t\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresp, err := p.httpClient.Do(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresp.Body.Close()\n\t\treturn nil\n\t})\n}", "func WaitReady() {\n\tif deviceReady {\n\t\treturn\n\t}\n\tch := make(chan struct{}, 0)\n\tf := func() {\n\t\tdeviceReady = true\n\t\tclose(ch)\n\t}\n\tOnDeviceReady(f)\n\t<-ch\n\tUnDeviceReady(f)\n}", "func (s *S8Proxy) WaitUntilClientIsReady() {\n\ts.gtpClient.WaitUntilClientIsReady(0)\n}", "func (m *DomainMonitor) WaitReady() (err error) {\n\treturn m.sink.WaitReady()\n}", "func WaitReady(obc *nbv1.ObjectBucketClaim) bool {\n\tlog := util.Logger()\n\tklient := util.KubeClient()\n\n\tinterval := time.Duration(3)\n\n\terr := wait.PollUntilContextCancel(ctx,interval*time.Second, true, func(ctx context.Context) (bool, error) {\n\t\terr := klient.Get(util.Context(), util.ObjectKey(obc), obc)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"⏳ Failed to get OBC: %s\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\tCheckPhase(obc)\n\t\tif obc.Status.Phase == obv1.ObjectBucketClaimStatusPhaseFailed {\n\t\t\treturn false, fmt.Errorf(\"ObjectBucketClaimStatusPhaseFailed\")\n\t\t}\n\t\tif obc.Status.Phase != obv1.ObjectBucketClaimStatusPhaseBound {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t})\n\treturn (err == nil)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
waitForCRDs waits for machinecontroller CRDs to be created and become established
func waitForCRDs(s *state.State) error { condFn := clientutil.CRDsReadyCondition(s.Context, s.DynamicClient, CRDNames()) err := wait.PollUntilContextTimeout(s.Context, 5*time.Second, 3*time.Minute, false, condFn.WithContext()) return fail.KubeClient(err, "waiting for machine-controller CRDs to became ready") }
[ "func (m *MeshReconciler) waitForCRD(name string, client runtimeclient.Client) error {\n\tm.logger.WithField(\"name\", name).Debug(\"waiting for CRD\")\n\n\tbackoffConfig := backoff.ConstantBackoffConfig{\n\t\tDelay: time.Duration(backoffDelaySeconds) * time.Second,\n\t\tMaxRetries: backoffMaxretries,\n\t}\n\tbackoffPolicy := backoff.NewConstantBackoffPolicy(backoffConfig)\n\n\tvar crd apiextensionsv1beta1.CustomResourceDefinition\n\terr := backoff.Retry(func() error {\n\t\terr := client.Get(context.Background(), types.NamespacedName{\n\t\t\tName: name,\n\t\t}, &crd)\n\t\tif err != nil {\n\t\t\treturn errors.WrapIf(err, \"could not get CRD\")\n\t\t}\n\n\t\tfor _, condition := range crd.Status.Conditions {\n\t\t\tif condition.Type == apiextensionsv1beta1.Established {\n\t\t\t\tif condition.Status == apiextensionsv1beta1.ConditionTrue {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn errors.New(\"CRD is not established yet\")\n\t}, backoffPolicy)\n\n\treturn err\n}", "func waitForCRDEstablishment(clientset apiextensionsclient.Interface) error {\n\treturn wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) {\n\t\tsparkAppCrd, err := getCRD(clientset)\n\t\tfor _, cond := range sparkAppCrd.Status.Conditions {\n\t\t\tswitch cond.Type {\n\t\t\tcase apiextensionsv1beta1.Established:\n\t\t\t\tif cond.Status == apiextensionsv1beta1.ConditionTrue {\n\t\t\t\t\treturn true, err\n\t\t\t\t}\n\t\t\tcase apiextensionsv1beta1.NamesAccepted:\n\t\t\t\tif cond.Status == apiextensionsv1beta1.ConditionFalse {\n\t\t\t\t\tfmt.Printf(\"Name conflict: %v\\n\", cond.Reason)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false, err\n\t})\n}", "func (c *Controller) WaitForDiskCRD() {\n\tfor {\n\t\t_, err := c.ListDiskResource()\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Disk CRD is not available yet. Retrying after %v, error: %v\", CRDRetryInterval, err)\n\t\t\ttime.Sleep(CRDRetryInterval)\n\t\t\tcontinue\n\t\t}\n\t\tglog.Info(\"Disk CRD is available\")\n\t\tbreak\n\t}\n}", "func (m *MeshReconciler) waitForCRD(name string, client runtimeclient.Client) error {\n\tm.logger.WithField(\"name\", name).Debug(\"waiting for CRD\")\n\n\tvar backoffConfig = backoff.ConstantBackoffConfig{\n\t\tDelay: time.Duration(backoffDelaySeconds) * time.Second,\n\t\tMaxRetries: backoffMaxretries,\n\t}\n\tvar backoffPolicy = backoff.NewConstantBackoffPolicy(backoffConfig)\n\n\tvar crd apiextensionsv1beta1.CustomResourceDefinition\n\terr := backoff.Retry(func() error {\n\t\terr := client.Get(context.Background(), types.NamespacedName{\n\t\t\tName: name,\n\t\t}, &crd)\n\t\tif err != nil {\n\t\t\treturn errors.WrapIf(err, \"could not get CRD\")\n\t\t}\n\n\t\treturn nil\n\t}, backoffPolicy)\n\n\treturn err\n}", "func waitForEstablishedCRD(client clientset.Interface, name string) error {\n\treturn wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {\n\t\tcrd, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tfor _, cond := range crd.Status.Conditions {\n\t\t\tswitch cond.Type {\n\t\t\tcase apiextensionsv1beta1.Established:\n\t\t\t\tif cond.Status == apiextensionsv1beta1.ConditionTrue {\n\t\t\t\t\treturn true, err\n\t\t\t\t}\n\t\t\tcase apiextensionsv1beta1.NamesAccepted:\n\t\t\t\tif cond.Status == apiextensionsv1beta1.ConditionFalse {\n\t\t\t\t\tglog.Infof(\"Name conflict: %v\\n\", cond.Reason)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false, nil\n\t})\n}", "func waitForCRDCreated(clientset *fake.Clientset, CRDName string) error {\n\treturn wait.Poll(50*time.Millisecond, 5*time.Second, func() (bool, error) {\n\t\t_, err := clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Get(CRDName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn true, err\n\t})\n}", "func waitForKdc() {\n\tfor {\n\t\tresp, err := bash.Run(\"check\", []string{keyDir})\n\t\tif err != nil || resp != 0 {\n\t\t\tif resp != 0 {\n\t\t\t\tfmt.Println(\"KDC is not yet available. Shell return code is \" + strconv.Itoa(resp))\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"KDC is not yet available \" + err.Error())\n\t\t\t}\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t} else {\n\t\t\treturn\n\t\t}\n\t}\n}", "func (rcc *rotateCertsCmd) waitForControlPlaneReadiness() error {\n\tlog.Info(\"Checking health of control plane components\")\n\tpods := make([]string, 0)\n\tfor _, n := range rcc.cs.Properties.GetMasterVMNameList() {\n\t\tfor _, c := range []string{kubeAddonManager, kubeAPIServer, kubeControllerManager, kubeScheduler} {\n\t\t\tpods = append(pods, fmt.Sprintf(\"%s-%s\", c, n))\n\t\t}\n\t}\n\tif err := ops.WaitForReady(rcc.kubeClient, metav1.NamespaceSystem, pods, rotateCertsDefaultInterval, rotateCertsDefaultTimeout, rcc.nodes); err != nil {\n\t\treturn errors.Wrap(err, \"waiting for control plane containers to reach the Ready state within the timeout period\")\n\t}\n\treturn nil\n}", "func CreateCRD(context Context, resources []CustomResource) error {\n\tvar lastErr error\n\tfor _, resource := range resources {\n\t\terr := createCRD(context, resource)\n\t\tif err != nil {\n\t\t\tlastErr = err\n\t\t}\n\t}\n\n\tfor _, resource := range resources {\n\t\tif err := waitForCRDInit(context, resource); err != nil {\n\t\t\tlastErr = err\n\t\t}\n\t}\n\n\treturn lastErr\n}", "func WaitForCRDEstablished(clientset apiextensions.Interface, crdName string) error {\n\treturn wait.Poll(250*time.Millisecond, 30*time.Second, func() (bool, error) {\n\t\tcrd, err := clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Get(crdName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tfor _, cond := range crd.Status.Conditions {\n\t\t\tswitch cond.Type {\n\t\t\tcase apiextensionsv1.Established:\n\t\t\t\tif cond.Status == apiextensionsv1.ConditionTrue {\n\t\t\t\t\treturn true, err\n\t\t\t\t}\n\t\t\tcase apiextensionsv1.NamesAccepted:\n\t\t\t\tif cond.Status == apiextensionsv1.ConditionFalse {\n\t\t\t\t\tlogrus.WithField(\"reason\", cond.Reason).Warn(\"Name conflict\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false, err\n\t})\n}", "func waitForMachineController(ctx context.Context, client dynclient.Client) error {\n\tcondFn := clientutil.PodsReadyCondition(ctx, client, dynclient.ListOptions{\n\t\tNamespace: resources.MachineControllerNameSpace,\n\t\tLabelSelector: labels.SelectorFromSet(map[string]string{\n\t\t\tappLabelKey: resources.MachineControllerName,\n\t\t}),\n\t})\n\n\treturn fail.KubeClient(wait.PollUntilContextTimeout(ctx, 5*time.Second, 3*time.Minute, false, condFn.WithContext()), \"waiting for machine-controller to became ready\")\n}", "func EnsureCRDs(mgr manager.Manager, cohFlags *flags.CoherenceOperatorFlags, log logr.Logger) error {\n\t// Create the CRD client\n\tc, err := apiextensions.NewForConfig(mgr.GetConfig())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcrdClient := c.ApiextensionsV1beta1().CustomResourceDefinitions()\n\n\treturn EnsureCRDsUsingClient(mgr, cohFlags, log, crdClient)\n}", "func waitForConductor(ctx context.Context, client *gophercloud.ServiceClient) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tlog.Printf(\"[DEBUG] Waiting for conductor API to become available...\")\n\t\t\tdriverCount := 0\n\n\t\t\tdrivers.ListDrivers(client, drivers.ListDriversOpts{\n\t\t\t\tDetail: false,\n\t\t\t}).EachPage(func(page pagination.Page) (bool, error) {\n\t\t\t\tactual, err := drivers.ExtractDrivers(page)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tdriverCount += len(actual)\n\t\t\t\treturn true, nil\n\t\t\t})\n\t\t\t// If we have any drivers, conductor is up.\n\t\t\tif driverCount > 0 {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t}\n\t}\n}", "func waitUntilRDSClusterCreated(rdsClientSess *rds.RDS, restoreParams map[string]string) error {\n\trdsClusterName := restoreParams[\"restoreRDS\"]\n\n\tmaxWaitAttempts := 120\n\n\tinput := &rds.DescribeDBClustersInput{\n\t\tDBClusterIdentifier: aws.String(rdsClusterName),\n\t}\n\n\tfmt.Printf(\"Wait until RDS cluster [%v] is fully created ...\\n\", rdsClusterName)\n\n\tstart := time.Now()\n\n\t// Check until created\n\tfor waitAttempt := 0; waitAttempt < maxWaitAttempts; waitAttempt++ {\n\t\telapsedTime := time.Since(start)\n\t\tif waitAttempt > 0 {\n\t\t\tformattedTime := strings.Split(fmt.Sprintf(\"%6v\", elapsedTime), \".\")\n\t\t\tfmt.Printf(\"Cluster creation elapsed time: %vs\\n\", formattedTime[0])\n\t\t}\n\n\t\tresp, err := rdsClientSess.DescribeDBClusters(input)\n\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Wait RDS cluster creation err %v\", err)\n\t\t}\n\n\t\tfmt.Printf(\"Cluster status: [%s]\\n\", *resp.DBClusters[0].Status)\n\t\tif *resp.DBClusters[0].Status == \"available\" {\n\t\t\tfmt.Printf(\"RDS cluster [%v] created successfully\\n\", rdsClusterName)\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(30 * time.Second)\n\t}\n\treturn fmt.Errorf(\"Aurora Cluster [%v] is not ready, exceed max wait attemps\\n\", rdsClusterName)\n}", "func waitForPods(cs *framework.ClientSet, expectedTotal int) error {\n\terr := wait.PollImmediate(1*time.Second, 5*time.Minute, func() (bool, error) {\n\t\tguardPods, err := cs.CoreV1Interface.Pods(\"openshift-etcd\").List(context.TODO(), metav1.ListOptions{\n\t\t\tLabelSelector: guardPodsLabelSelectorString,\n\t\t})\n\t\tif err != nil {\n\t\t\tfmt.Printf(\" error listing etcd guard pods: %v\\n\", err)\n\t\t\treturn true, err\n\t\t}\n\t\tnumGuardPods := len(guardPods.Items)\n\t\tif numGuardPods == 0 {\n\t\t\tfmt.Println(\" no guard pods found\")\n\t\t\treturn false, nil\n\t\t}\n\t\tnumReadyPods := countReadyPods(guardPods.Items)\n\t\tif numReadyPods == expectedTotal {\n\t\t\tfmt.Printf(\" %d ready etcd guard pods found! \\n\", numReadyPods)\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func waitForCP(restConfig *rest.Config, clustername string) (bool, error) {\n\tlog.Info(\"Waiting for the Control Plane to appear\")\n\t// Set the vars we need\n\tcpname := clustername + \"-control-plane\"\n\tvar expectedCPReplicas int32 = 3\n\n\t// We need to load the scheme since it's not part of the core API\n\tscheme := runtime.NewScheme()\n\t_ = kcpv1.AddToScheme(scheme)\n\n\tc, err := client.New(restConfig, client.Options{\n\t\tScheme: scheme,\n\t})\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// wait up until 20 minutes\n\tcounter := 0\n\tfor runs := 20; counter <= runs; counter++ {\n\t\tif counter > runs {\n\t\t\treturn false, errors.New(\"control-plane did not come up after 10 minutes\")\n\t\t}\n\t\t// get the current status, wait for 3 CP nodes\n\t\tkcp := &kcpv1.KubeadmControlPlane{}\n\t\tif err := c.Get(context.TODO(), client.ObjectKey{Namespace: \"default\", Name: cpname}, kcp); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif kcp.Status.Replicas == expectedCPReplicas {\n\t\t\tbreak\n\t\t}\n\n\t\ttime.Sleep(time.Minute)\n\n\t}\n\n\treturn true, nil\n}", "func waitForInit() error {\n\tstart := time.Now()\n\tmaxEnd := start.Add(time.Minute)\n\tfor {\n\t\t// Check for existence of vpcCniInitDonePath\n\t\tif _, err := os.Stat(vpcCniInitDonePath); err == nil {\n\t\t\t// Delete the done file in case of a reboot of the node or restart of the container (force init container to run again)\n\t\t\tif err := os.Remove(vpcCniInitDonePath); err == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// If file deletion fails, log and allow retry\n\t\t\tlog.Errorf(\"Failed to delete file: %s\", vpcCniInitDonePath)\n\t\t}\n\t\tif time.Now().After(maxEnd) {\n\t\t\treturn errors.Errorf(\"time exceeded\")\n\t\t}\n\t\ttime.Sleep(1 * time.Second)\n\t}\n}", "func (r *CRDRegistry) RegisterCRDs() error {\n\tfor _, crd := range crds {\n\t\t// create the CustomResourceDefinition in the api\n\t\tif err := r.createCRD(crd); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// wait for the CustomResourceDefinition to be established\n\t\tif err := r.awaitCRD(crd, watchTimeout); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func WaitAllReady() {\n\tlog := util.Logger()\n\tklient := util.KubeClient()\n\tcrds := LoadCrds()\n\tintervalSec := time.Duration(3)\n\tlist := []*CrdType{\n\t\tcrds.NooBaa, crds.BackingStore, crds.BucketClass,\n\t}\n\tutil.Panic(wait.PollImmediateInfinite(intervalSec*time.Second, func() (bool, error) {\n\t\tallReady := true\n\t\tfor _, crd := range list {\n\t\t\terr := klient.Get(util.Context(), client.ObjectKey{Name: crd.Name}, crd)\n\t\t\tutil.Panic(err)\n\t\t\tready, err := IsReady(crd)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"❌ %s\", err)\n\t\t\t\tallReady = false\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !ready {\n\t\t\t\tlog.Printf(\"❌ CRD is not ready. Need to wait ...\")\n\t\t\t\tallReady = false\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\treturn allReady, nil\n\t}))\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DestroyWorkers destroys all MachineDeployment, MachineSet and Machine objects
func DestroyWorkers(s *state.State) error { if !s.Cluster.MachineController.Deploy { s.Logger.Info("Skipping deleting workers because machine-controller is disabled in configuration.") return nil } if s.DynamicClient == nil { return fail.NoKubeClient() } ctx := context.Background() // Annotate nodes with kubermatic.io/skip-eviction=true to skip eviction s.Logger.Info("Annotating nodes to skip eviction...") nodes := &corev1.NodeList{} if err := s.DynamicClient.List(ctx, nodes); err != nil { return fail.KubeClient(err, "listing Nodes") } for _, node := range nodes.Items { nodeKey := dynclient.ObjectKey{Name: node.Name} retErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { n := corev1.Node{} if err := s.DynamicClient.Get(ctx, nodeKey, &n); err != nil { return fail.KubeClient(err, "getting %T %s", n, nodeKey) } if n.Annotations == nil { n.Annotations = map[string]string{} } n.Annotations["kubermatic.io/skip-eviction"] = "true" return fail.KubeClient(s.DynamicClient.Update(ctx, &n), "updating %T %s", n, nodeKey) }) if retErr != nil { return retErr } } // Delete all MachineDeployment objects s.Logger.Info("Deleting MachineDeployment objects...") mdList := &clusterv1alpha1.MachineDeploymentList{} if err := s.DynamicClient.List(ctx, mdList, dynclient.InNamespace(resources.MachineControllerNameSpace)); err != nil { if !errorsutil.IsNotFound(err) { return fail.KubeClient(err, "listing %T", mdList) } } for i := range mdList.Items { if err := s.DynamicClient.Delete(ctx, &mdList.Items[i]); err != nil { md := mdList.Items[i] return fail.KubeClient(err, "deleting %T %s", md, dynclient.ObjectKeyFromObject(&md)) } } // Delete all MachineSet objects s.Logger.Info("Deleting MachineSet objects...") msList := &clusterv1alpha1.MachineSetList{} if err := s.DynamicClient.List(ctx, msList, dynclient.InNamespace(resources.MachineControllerNameSpace)); err != nil { if !errorsutil.IsNotFound(err) { return fail.KubeClient(err, "getting %T", mdList) } } for i := range msList.Items { if err := s.DynamicClient.Delete(ctx, &msList.Items[i]); err != nil { if !errorsutil.IsNotFound(err) { ms := msList.Items[i] return fail.KubeClient(err, "deleting %T %s", ms, dynclient.ObjectKeyFromObject(&ms)) } } } // Delete all Machine objects s.Logger.Info("Deleting Machine objects...") mList := &clusterv1alpha1.MachineList{} if err := s.DynamicClient.List(ctx, mList, dynclient.InNamespace(resources.MachineControllerNameSpace)); err != nil { if !errorsutil.IsNotFound(err) { return fail.KubeClient(err, "getting %T", mList) } } for i := range mList.Items { if err := s.DynamicClient.Delete(ctx, &mList.Items[i]); err != nil { if !errorsutil.IsNotFound(err) { ma := mList.Items[i] return fail.KubeClient(err, "deleting %T %s", ma, dynclient.ObjectKeyFromObject(&ma)) } } } return nil }
[ "func (p *Pool) Destroy() {\n\tp.tasks.Wait()\n\n\tvar wg sync.WaitGroup\n\tfor _, w := range p.Workers() {\n\t\twg.Add(1)\n\t\tgo func(w *Worker) {\n\t\t\tdefer wg.Done()\n\n\t\t\tp.destroyWorker(w)\n\t\t}(w)\n\t}\n\n\twg.Wait()\n}", "func (bc *BaseCluster) Destroy() {\n\tfor _, m := range bc.Machines() {\n\t\tbc.numMachines--\n\t\tm.Destroy()\n\t}\n}", "func (mr *MapReduce) KillWorkers() *list.List {\n l := list.New()\n for _, w := range mr.Workers {\n DPrintf(\"DoWork: shutdown %s\\n\", w.address)\n args := &ShutdownArgs{}\n var reply ShutdownReply;\n ok := call(w.address, \"Worker.Shutdown\", args, &reply)\n if ok == false {\n fmt.Printf(\"DoWork: RPC %s shutdown error\\n\", w.address)\n } else {\n l.PushBack(reply.Njobs)\n }\n }\n return l\n}", "func (c *Client) DestroyAllWorkers() {\n\tfor _, x := range c.workers {\n\t\tif x.IsRunning() {\n\t\t\tx.StopBlocking()\n\t\t}\n\t}\n\tc.workers = []*BackgroundWorker{}\n}", "func KillWorkloads(clientset kubernetes.Interface) {\n\t// Look for namespace or default to default namespace\n\tnamespace := helpers.GetEnv(\"NAMESPACE\", \"default\")\n\t// Wait Group To handle the waiting for all deletes to complete\n\tvar wg sync.WaitGroup\n\twg.Add(6)\n\t// Delete all Deployments\n\tif helpers.CheckDeleteResourceAllowed(\"deployments\") {\n\t\tgo deleteDeployments(clientset, &namespace, &wg)\n\t}\n\t// Delete all Statefulsets\n\tif helpers.CheckDeleteResourceAllowed(\"statefulsets\") {\n\t\tgo deleteStatefulsets(clientset, &namespace, &wg)\n\t}\n\t// Delete Services\n\tif helpers.CheckDeleteResourceAllowed(\"services\") {\n\t\tgo deleteServices(clientset, &namespace, &wg)\n\t}\n\t// Delete All Secrets\n\tif helpers.CheckDeleteResourceAllowed(\"secrets\") {\n\t\tgo deleteSecrets(clientset, &namespace, &wg)\n\t}\n\t// Delete All Configmaps\n\tif helpers.CheckDeleteResourceAllowed(\"configmaps\") {\n\t\tgo deleteConfigMaps(clientset, &namespace, &wg)\n\t}\n\t// Delete All Daemonsets\n\tif helpers.CheckDeleteResourceAllowed(\"daemonsets\") {\n\t\tgo deleteDaemonSets(clientset, &namespace, &wg)\n\t}\n\t// wait for processes to finish\n\twg.Wait()\n}", "func KillWorkloads(clientset kubernetes.Interface) {\n\t// Look for namespace or default to default namespace\n\tnamespace := helpers.GetEnv(\"NAMESPACE\", \"default\")\n\t// Wait Group To handle the waiting for all deletes to complete\n\tvar wg sync.WaitGroup\n\twg.Add(5)\n\t// Delete all Deployments\n\tif helpers.CheckDeleteResourceAllowed(\"deployments\") {\n\t\tgo deleteDeployments(clientset, &namespace, &wg)\n\t}\n\t// Delete all Statefulsets\n\tif helpers.CheckDeleteResourceAllowed(\"statefulsets\") {\n\t\tgo deleteStatefulsets(clientset, &namespace, &wg)\n\t}\n\t// Delete Services\n\tif helpers.CheckDeleteResourceAllowed(\"services\") {\n\t\tgo deleteServices(clientset, &namespace, &wg)\n\t}\n\t// Delete All Secrets\n\tif helpers.CheckDeleteResourceAllowed(\"secrets\") {\n\t\tgo deleteSecrets(clientset, &namespace, &wg)\n\t}\n\t// Delete All Configmaps\n\tif helpers.CheckDeleteResourceAllowed(\"configmaps\") {\n\t\tgo deleteConfigMaps(clientset, &namespace, &wg)\n\t}\n\t// wait for processes to finish\n\twg.Wait()\n}", "func (mr *MapReduce) KillWorkers() *list.List {\n\tl := list.New()\n\tfor _, w := range mr.Workers {\n\t\tDPrintf(\"DoWork: shutdown %s\\n\", w.address)\n\t\targs := &ShutdownArgs{}\n\t\tvar reply ShutdownReply\n\t\tok := call(w.address, \"Worker.Shutdown\", args, &reply)\n\t\tif ok == false {\n\t\t\tfmt.Printf(\"DoWork: RPC shutdown error\\n\")\n\t\t} else {\n\t\t\tl.PushBack(reply.Njobs)\n\t\t}\n\t}\n\treturn l\n}", "func (d Driver) DestroyWorker(project, branch string) error {\n\tdPtr := &d\n\treturn dPtr.remove(context.Background(), d.containerID)\n}", "func (j *JuiceFSEngine) destroyWorkers(expectedWorkers int32) (currentWorkers int32, err error) {\n\t// SchedulerMutex only for patch mode\n\tlifecycle.SchedulerMutex.Lock()\n\tdefer lifecycle.SchedulerMutex.Unlock()\n\n\truntimeInfo, err := j.getRuntimeInfo()\n\tif err != nil {\n\t\treturn currentWorkers, err\n\t}\n\n\tvar (\n\t\tnodeList = &corev1.NodeList{}\n\t\tlabelExclusiveName = utils.GetExclusiveKey()\n\t\tlabelName = runtimeInfo.GetRuntimeLabelName()\n\t\tlabelCommonName = runtimeInfo.GetCommonLabelName()\n\t\tlabelMemoryName = runtimeInfo.GetLabelNameForMemory()\n\t\tlabelDiskName = runtimeInfo.GetLabelNameForDisk()\n\t\tlabelTotalName = runtimeInfo.GetLabelNameForTotal()\n\t)\n\n\tlabelNames := []string{labelName, labelTotalName, labelDiskName, labelMemoryName, labelCommonName}\n\tj.Log.Info(\"check node labels\", \"labelNames\", labelNames)\n\n\tdatasetLabels, err := labels.Parse(fmt.Sprintf(\"%s=true\", labelCommonName))\n\tif err != nil {\n\t\treturn currentWorkers, err\n\t}\n\n\terr = j.List(context.TODO(), nodeList, &client.ListOptions{\n\t\tLabelSelector: datasetLabels,\n\t})\n\n\tif err != nil {\n\t\treturn currentWorkers, err\n\t}\n\n\tcurrentWorkers = int32(len(nodeList.Items))\n\tif expectedWorkers >= currentWorkers {\n\t\tj.Log.Info(\"No need to scale in. Skip.\")\n\t\treturn currentWorkers, nil\n\t}\n\n\tvar nodes []corev1.Node\n\tif expectedWorkers >= 0 {\n\t\tj.Log.Info(\"Scale in juicefs workers\", \"expectedWorkers\", expectedWorkers)\n\t\t// This is a scale in operation\n\t\truntimeInfo, err := j.getRuntimeInfo()\n\t\tif err != nil {\n\t\t\tj.Log.Error(err, \"getRuntimeInfo when scaling in\")\n\t\t\treturn currentWorkers, err\n\t\t}\n\n\t\tfuseGlobal, _ := runtimeInfo.GetFuseDeployMode()\n\t\tnodes, err = j.sortNodesToShutdown(nodeList.Items, fuseGlobal)\n\t\tif err != nil {\n\t\t\treturn currentWorkers, err\n\t\t}\n\n\t} else {\n\t\t// Destroy all workers. This is a subprocess during deletion of JuiceFSRuntime\n\t\tnodes = nodeList.Items\n\t}\n\n\t// 1.select the nodes\n\tfor _, node := range nodes {\n\t\tif expectedWorkers == currentWorkers {\n\t\t\tbreak\n\t\t}\n\n\t\tif len(node.Labels) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tnodeName := node.Name\n\t\tvar labelsToModify common.LabelsToModify\n\t\terr = retry.RetryOnConflict(retry.DefaultBackoff, func() error {\n\t\t\tnode, err := kubeclient.GetNode(j.Client, nodeName)\n\t\t\tif err != nil {\n\t\t\t\tj.Log.Error(err, \"Fail to get node\", \"nodename\", nodeName)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\ttoUpdate := node.DeepCopy()\n\t\t\tfor _, label := range labelNames {\n\t\t\t\tlabelsToModify.Delete(label)\n\t\t\t}\n\n\t\t\texclusiveLabelValue := utils.GetExclusiveValue(j.namespace, j.name)\n\t\t\tif val, exist := toUpdate.Labels[labelExclusiveName]; exist && val == exclusiveLabelValue {\n\t\t\t\tlabelsToModify.Delete(labelExclusiveName)\n\t\t\t}\n\n\t\t\terr = lifecycle.DecreaseDatasetNum(toUpdate, runtimeInfo, &labelsToModify)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// Update the toUpdate in UPDATE mode\n\t\t\t// modifiedLabels, err := utils.ChangeNodeLabelWithUpdateMode(e.Client, toUpdate, labelToModify)\n\t\t\t// Update the toUpdate in PATCH mode\n\t\t\tmodifiedLabels, err := utils.ChangeNodeLabelWithPatchMode(j.Client, toUpdate, labelsToModify)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tj.Log.Info(\"Destroy worker\", \"Dataset\", j.name, \"deleted worker node\", node.Name, \"removed or updated labels\", modifiedLabels)\n\t\t\treturn nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn currentWorkers, err\n\t\t}\n\n\t\tcurrentWorkers--\n\t}\n\n\treturn currentWorkers, nil\n}", "func (s *Server) stopWorkers() {\n\tfor k, w := range s.workers {\n\t\tw.stop()\n\n\t\t// fix nil exception\n\t\tdelete(s.workers, k)\n\t}\n}", "func (m *ManagerImpl) StopWorkers() {\n\tm.processor.StopWorkers()\n}", "func (o *ClusterUninstaller) destroyComputeInstances() error {\n\tinstances, err := o.listComputeInstances()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, instance := range instances {\n\t\terr := o.deleteComputeInstance(instance)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (_m *IProvider) KillAllWorkerProcesses() {\n\t_m.Called()\n}", "func (s *Server) CleanupForDestroy() {\n\ts.CtxCancel()\n\ts.Events().Destroy()\n\ts.DestroyAllSinks()\n\ts.Websockets().CancelAll()\n\ts.powerLock.Destroy()\n}", "func (p *Pool) destroyWorker(w *Worker) {\n\tp.throw(EventDestruct, w, nil)\n\n\t// detaching\n\tp.muw.Lock()\n\tfor i, wc := range p.workers {\n\t\tif wc == w {\n\t\t\tp.workers = p.workers[:i+1]\n\t\t\tbreak\n\t\t}\n\t}\n\tp.muw.Unlock()\n\n\tgo w.Stop()\n\n\tselect {\n\tcase <-w.waitDone:\n\t\t// worker is dead\n\tcase <-time.NewTimer(p.cfg.DestroyTimeout).C:\n\t\t// failed to stop process\n\t\tif err := w.Kill(); err != nil {\n\t\t\tp.throw(EventError, w, err)\n\t\t}\n\t}\n}", "func (manager *syncerManager) garbageCollectSyncer() {\n\tmanager.mu.Lock()\n\tdefer manager.mu.Unlock()\n\tfor key, syncer := range manager.syncerMap {\n\t\tif syncer.IsStopped() && !syncer.IsShuttingDown() {\n\t\t\tdelete(manager.syncerMap, key)\n\t\t}\n\t}\n}", "func (m *McmManager) DeleteMachines(machines []*Ref) error {\n\n\tvar (\n\t\tmdclone *v1alpha1.MachineDeployment\n\t\tterminatingMachines []*v1alpha1.Machine\n\t)\n\n\tif len(machines) == 0 {\n\t\treturn nil\n\t}\n\tcommonMachineDeployment, err := m.GetMachineDeploymentForMachine(machines[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, machine := range machines {\n\t\tmachinedeployment, err := m.GetMachineDeploymentForMachine(machine)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif machinedeployment.Name != commonMachineDeployment.Name {\n\t\t\treturn fmt.Errorf(\"Cannot delete machines which don't belong to the same MachineDeployment\")\n\t\t}\n\t}\n\n\tfor _, machine := range machines {\n\n\t\tretryDeadline := time.Now().Add(maxRetryDeadline)\n\t\tfor {\n\t\t\tmachine, err := m.machineLister.Machines(m.namespace).Get(machine.Name)\n\t\t\tif err != nil && time.Now().Before(retryDeadline) {\n\t\t\t\tklog.Warningf(\"Unable to fetch Machine object %s, Error: %s\", machine.Name, err)\n\t\t\t\ttime.Sleep(conflictRetryInterval)\n\t\t\t\tcontinue\n\t\t\t} else if err != nil {\n\t\t\t\t// Timeout occurred\n\t\t\t\tklog.Errorf(\"Unable to fetch Machine object %s, Error: %s\", machine.Name, err)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tmclone := machine.DeepCopy()\n\n\t\t\tif isMachineTerminating(mclone) {\n\t\t\t\tterminatingMachines = append(terminatingMachines, mclone)\n\t\t\t}\n\t\t\tif mclone.Annotations != nil {\n\t\t\t\tif mclone.Annotations[machinePriorityAnnotation] == \"1\" {\n\t\t\t\t\tklog.Infof(\"Machine %q priority is already set to 1, hence skipping the update\", machine.Name)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tmclone.Annotations[machinePriorityAnnotation] = \"1\"\n\t\t\t} else {\n\t\t\t\tmclone.Annotations = make(map[string]string)\n\t\t\t\tmclone.Annotations[machinePriorityAnnotation] = \"1\"\n\t\t\t}\n\n\t\t\t_, err = m.machineClient.Machines(machine.Namespace).Update(context.TODO(), mclone, metav1.UpdateOptions{})\n\t\t\tif err != nil && time.Now().Before(retryDeadline) {\n\t\t\t\tklog.Warningf(\"Unable to update Machine object %s, Error: %s\", machine.Name, err)\n\t\t\t\ttime.Sleep(conflictRetryInterval)\n\t\t\t\tcontinue\n\t\t\t} else if err != nil {\n\t\t\t\t// Timeout occurred\n\t\t\t\tklog.Errorf(\"Unable to update Machine object %s, Error: %s\", machine.Name, err)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Break out of loop when update succeeds\n\t\t\tbreak\n\t\t}\n\t}\n\n\tretryDeadline := time.Now().Add(maxRetryDeadline)\n\tfor {\n\t\tmd, err := m.machineDeploymentLister.MachineDeployments(m.namespace).Get(commonMachineDeployment.Name)\n\t\tif err != nil && time.Now().Before(retryDeadline) {\n\t\t\tklog.Warningf(\"Unable to fetch MachineDeployment object %s, Error: %s\", commonMachineDeployment.Name, err)\n\t\t\ttime.Sleep(conflictRetryInterval)\n\t\t\tcontinue\n\t\t} else if err != nil {\n\t\t\t// Timeout occurred\n\t\t\tklog.Errorf(\"Unable to fetch MachineDeployment object %s, Error: %s\", commonMachineDeployment.Name, err)\n\t\t\treturn err\n\t\t}\n\n\t\tmdclone = md.DeepCopy()\n\t\tif (int(mdclone.Spec.Replicas) - len(machines)) < 0 {\n\t\t\treturn fmt.Errorf(\"Unable to delete machine in MachineDeployment object %s , machine replicas are < 0 \", commonMachineDeployment.Name)\n\t\t}\n\t\texpectedReplicas := mdclone.Spec.Replicas - int32(len(machines)) + int32(len(terminatingMachines))\n\t\tif expectedReplicas == mdclone.Spec.Replicas {\n\t\t\tklog.Infof(\"MachineDeployment %q is already set to %d, skipping the update\", mdclone.Name, expectedReplicas)\n\t\t\tbreak\n\t\t}\n\n\t\tmdclone.Spec.Replicas = expectedReplicas\n\n\t\t_, err = m.machineClient.MachineDeployments(mdclone.Namespace).Update(context.TODO(), mdclone, metav1.UpdateOptions{})\n\t\tif err != nil && time.Now().Before(retryDeadline) {\n\t\t\tklog.Warningf(\"Unable to update MachineDeployment object %s, Error: %s\", commonMachineDeployment.Name, err)\n\t\t\ttime.Sleep(conflictRetryInterval)\n\t\t\tcontinue\n\t\t} else if err != nil {\n\t\t\t// Timeout occurred\n\t\t\tklog.Errorf(\"Unable to update MachineDeployment object %s, Error: %s\", commonMachineDeployment.Name, err)\n\t\t\treturn err\n\t\t}\n\n\t\t// Break out of loop when update succeeds\n\t\tbreak\n\t}\n\n\tklog.V(2).Infof(\"MachineDeployment %s size decreased to %d\", commonMachineDeployment.Name, mdclone.Spec.Replicas)\n\n\treturn nil\n}", "func (bq *InMemoryBuildQueue) TerminateWorkers(ctx context.Context, request *buildqueuestate.TerminateWorkersRequest) (*emptypb.Empty, error) {\n\tvar completionWakeups []chan struct{}\n\tbq.enter(bq.clock.Now())\n\tfor _, scq := range bq.sizeClassQueues {\n\t\tfor workerKey, w := range scq.workers {\n\t\t\tif workerMatchesPattern(workerKey.getWorkerID(), request.WorkerIdPattern) {\n\t\t\t\tw.terminating = true\n\t\t\t\tif t := w.currentTask; t != nil {\n\t\t\t\t\t// The task will be at the\n\t\t\t\t\t// EXECUTING stage, so it can\n\t\t\t\t\t// only transition to COMPLETED.\n\t\t\t\t\tcompletionWakeups = append(completionWakeups, t.stageChangeWakeup)\n\t\t\t\t} else if w.wakeup != nil {\n\t\t\t\t\t// Wake up the worker, so that\n\t\t\t\t\t// it's dequeued. This prevents\n\t\t\t\t\t// additional tasks to be\n\t\t\t\t\t// assigned to it.\n\t\t\t\t\tw.wakeUp(scq)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tbq.leave()\n\n\tfor _, completionWakeup := range completionWakeups {\n\t\tselect {\n\t\tcase <-completionWakeup:\n\t\t\t// Worker has become idle.\n\t\tcase <-ctx.Done():\n\t\t\t// Client has canceled the request.\n\t\t\treturn nil, util.StatusFromContext(ctx)\n\t\t}\n\t}\n\treturn &emptypb.Empty{}, nil\n}", "func (pm *Manager) cleanReservedPortsWorker() {\n\tfor {\n\t\ttime.Sleep(CleanReservedPortsInterval)\n\t\tpm.mu.Lock()\n\t\tfor name, ctx := range pm.reservedPorts {\n\t\t\tif ctx.Closed && time.Since(ctx.UpdateTime) > MaxPortReservedDuration {\n\t\t\t\tdelete(pm.reservedPorts, name)\n\t\t\t}\n\t\t}\n\t\tpm.mu.Unlock()\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WaitDestroy waits for all Machines to be deleted
func WaitDestroy(s *state.State) error { s.Logger.Info("Waiting for all machines to get deleted...") return wait.PollUntilContextTimeout(s.Context, 5*time.Second, 5*time.Minute, false, func(ctx context.Context) (bool, error) { list := &clusterv1alpha1.MachineList{} if err := s.DynamicClient.List(ctx, list, dynclient.InNamespace(resources.MachineControllerNameSpace)); err != nil { return false, fail.KubeClient(err, "getting %T", list) } if len(list.Items) != 0 { return false, nil } return true, nil }) }
[ "func (bc *BaseCluster) Destroy() {\n\tfor _, m := range bc.Machines() {\n\t\tbc.numMachines--\n\t\tm.Destroy()\n\t}\n}", "func WaitForCleanup() error {\n\tctx := context.Background()\n\tcli, err := client.NewEnvClient()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar (\n\t\tinterval = time.NewTicker(1 * time.Second)\n\t\ttimeout = time.NewTimer(1 * time.Minute)\n\t)\n\n\tfor range interval.C {\n\t\tselect {\n\t\tcase <-timeout.C:\n\t\t\treturn errors.New(\"timed out waiting for all easycontainers containers to get removed\")\n\t\tdefault:\n\t\t\t// only grab the containers created by easycontainers\n\t\t\targs := filters.NewArgs()\n\t\t\targs.Add(\"name\", \"/\"+prefix)\n\n\t\t\tcontainers, err := cli.ContainerList(ctx, types.ContainerListOptions{\n\t\t\t\tFilters: args,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif len(containers) == 0 {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *Machine) WaitForPVCsDelete(namespace string) error {\n\treturn wait.PollImmediate(m.PollInterval, m.PollTimeout, func() (bool, error) {\n\t\treturn m.PVCsDeleted(namespace)\n\t})\n}", "func DestroyWorkers(s *state.State) error {\n\tif !s.Cluster.MachineController.Deploy {\n\t\ts.Logger.Info(\"Skipping deleting workers because machine-controller is disabled in configuration.\")\n\n\t\treturn nil\n\t}\n\tif s.DynamicClient == nil {\n\t\treturn fail.NoKubeClient()\n\t}\n\n\tctx := context.Background()\n\n\t// Annotate nodes with kubermatic.io/skip-eviction=true to skip eviction\n\ts.Logger.Info(\"Annotating nodes to skip eviction...\")\n\tnodes := &corev1.NodeList{}\n\tif err := s.DynamicClient.List(ctx, nodes); err != nil {\n\t\treturn fail.KubeClient(err, \"listing Nodes\")\n\t}\n\n\tfor _, node := range nodes.Items {\n\t\tnodeKey := dynclient.ObjectKey{Name: node.Name}\n\n\t\tretErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {\n\t\t\tn := corev1.Node{}\n\t\t\tif err := s.DynamicClient.Get(ctx, nodeKey, &n); err != nil {\n\t\t\t\treturn fail.KubeClient(err, \"getting %T %s\", n, nodeKey)\n\t\t\t}\n\n\t\t\tif n.Annotations == nil {\n\t\t\t\tn.Annotations = map[string]string{}\n\t\t\t}\n\t\t\tn.Annotations[\"kubermatic.io/skip-eviction\"] = \"true\"\n\n\t\t\treturn fail.KubeClient(s.DynamicClient.Update(ctx, &n), \"updating %T %s\", n, nodeKey)\n\t\t})\n\n\t\tif retErr != nil {\n\t\t\treturn retErr\n\t\t}\n\t}\n\n\t// Delete all MachineDeployment objects\n\ts.Logger.Info(\"Deleting MachineDeployment objects...\")\n\tmdList := &clusterv1alpha1.MachineDeploymentList{}\n\tif err := s.DynamicClient.List(ctx, mdList, dynclient.InNamespace(resources.MachineControllerNameSpace)); err != nil {\n\t\tif !errorsutil.IsNotFound(err) {\n\t\t\treturn fail.KubeClient(err, \"listing %T\", mdList)\n\t\t}\n\t}\n\n\tfor i := range mdList.Items {\n\t\tif err := s.DynamicClient.Delete(ctx, &mdList.Items[i]); err != nil {\n\t\t\tmd := mdList.Items[i]\n\n\t\t\treturn fail.KubeClient(err, \"deleting %T %s\", md, dynclient.ObjectKeyFromObject(&md))\n\t\t}\n\t}\n\n\t// Delete all MachineSet objects\n\ts.Logger.Info(\"Deleting MachineSet objects...\")\n\tmsList := &clusterv1alpha1.MachineSetList{}\n\tif err := s.DynamicClient.List(ctx, msList, dynclient.InNamespace(resources.MachineControllerNameSpace)); err != nil {\n\t\tif !errorsutil.IsNotFound(err) {\n\t\t\treturn fail.KubeClient(err, \"getting %T\", mdList)\n\t\t}\n\t}\n\n\tfor i := range msList.Items {\n\t\tif err := s.DynamicClient.Delete(ctx, &msList.Items[i]); err != nil {\n\t\t\tif !errorsutil.IsNotFound(err) {\n\t\t\t\tms := msList.Items[i]\n\n\t\t\t\treturn fail.KubeClient(err, \"deleting %T %s\", ms, dynclient.ObjectKeyFromObject(&ms))\n\t\t\t}\n\t\t}\n\t}\n\n\t// Delete all Machine objects\n\ts.Logger.Info(\"Deleting Machine objects...\")\n\tmList := &clusterv1alpha1.MachineList{}\n\tif err := s.DynamicClient.List(ctx, mList, dynclient.InNamespace(resources.MachineControllerNameSpace)); err != nil {\n\t\tif !errorsutil.IsNotFound(err) {\n\t\t\treturn fail.KubeClient(err, \"getting %T\", mList)\n\t\t}\n\t}\n\n\tfor i := range mList.Items {\n\t\tif err := s.DynamicClient.Delete(ctx, &mList.Items[i]); err != nil {\n\t\t\tif !errorsutil.IsNotFound(err) {\n\t\t\t\tma := mList.Items[i]\n\n\t\t\t\treturn fail.KubeClient(err, \"deleting %T %s\", ma, dynclient.ObjectKeyFromObject(&ma))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func (client *Client) Destroy() error {\n\n\tconf, err := client.configClient.Load()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttfInputVars := client.tfInputVarsFactory.NewInputVars(conf)\n\n\tvar volumesToDelete []string\n\n\tswitch client.provider.IAAS() {\n\n\tcase iaas.AWS:\n\t\ttfOutputs, err1 := client.tfCLI.BuildOutput(tfInputVars)\n\t\tif err1 != nil {\n\t\t\treturn err1\n\t\t}\n\t\tvpcID, err2 := tfOutputs.Get(\"VPCID\")\n\t\tif err2 != nil {\n\t\t\treturn err2\n\t\t}\n\t\tvolumesToDelete, err1 = client.provider.DeleteVMsInVPC(vpcID)\n\t\tif err1 != nil {\n\t\t\treturn err1\n\t\t}\n\n\tcase iaas.GCP:\n\t\tproject, err1 := client.provider.Attr(\"project\")\n\t\tif err1 != nil {\n\t\t\treturn err1\n\t\t}\n\t\tzone := client.provider.Zone(\"\", \"\")\n\t\terr1 = client.provider.DeleteVMsInDeployment(zone, project, conf.GetDeployment())\n\t\tif err1 != nil {\n\t\t\treturn err1\n\t\t}\n\t}\n\n\terr = client.tfCLI.Destroy(tfInputVars)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif client.provider.IAAS() == iaas.AWS {\n\t\tif len(volumesToDelete) > 0 {\n\t\t\tfmt.Printf(\"Scheduling to delete %v volumes\\n\", len(volumesToDelete))\n\t\t}\n\t\tif err1 := client.provider.DeleteVolumes(volumesToDelete, iaas.DeleteVolume); err1 != nil {\n\t\t\treturn err1\n\t\t}\n\t}\n\n\tif err = client.configClient.DeleteAll(conf); err != nil {\n\t\treturn err\n\t}\n\n\treturn writeDestroySuccessMessage(client.stdout)\n}", "func (p *Pool) Destroy() {\n\tp.tasks.Wait()\n\n\tvar wg sync.WaitGroup\n\tfor _, w := range p.Workers() {\n\t\twg.Add(1)\n\t\tgo func(w *Worker) {\n\t\t\tdefer wg.Done()\n\n\t\t\tp.destroyWorker(w)\n\t\t}(w)\n\t}\n\n\twg.Wait()\n}", "func poolWaitDeleted(virConn *libvirt.Libvirt, uuid string) error {\n\tlog.Printf(\"Waiting for pool %s to be deleted...\", uuid)\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{poolExistsID},\n\t\tTarget: []string{poolNotExistsID},\n\t\tRefresh: poolExists(virConn, uuid),\n\t\tTimeout: 1 * time.Minute,\n\t\tDelay: 5 * time.Second,\n\t\tMinTimeout: 3 * time.Second,\n\t}\n\n\tif _, err := stateConf.WaitForState(); err != nil {\n\t\tlog.Printf(\"%s\", err)\n\t\treturn fmt.Errorf(\"unexpected error during pool destroy operation. The pool was not deleted\")\n\t}\n\treturn nil\n}", "func WaitForZKClusterToTerminate(t *testing.T, k8client client.Client, z *zkapi.ZookeeperCluster) error {\n\tlog.Printf(\"waiting for zookeeper cluster to terminate: %s\", z.Name)\n\n\tlistOptions := []client.ListOption{\n\t\tclient.InNamespace(z.GetNamespace()),\n\t\tclient.MatchingLabelsSelector{Selector: labels.SelectorFromSet(map[string]string{\"app\": z.GetName()})},\n\t}\n\n\t// Wait for Pods to terminate\n\terr := wait.Poll(RetryInterval, TerminateTimeout, func() (done bool, err error) {\n\t\tpodList := corev1.PodList{}\n\t\terr = k8client.List(goctx.TODO(), &podList, listOptions...)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tvar names []string\n\t\tfor i := range podList.Items {\n\t\t\tpod := &podList.Items[i]\n\t\t\tnames = append(names, pod.Name)\n\t\t}\n\t\tlog.Printf(\"waiting for pods to terminate, running pods (%v)\", names)\n\t\tif len(names) != 0 {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Wait for PVCs to terminate\n\terr = wait.Poll(RetryInterval, TerminateTimeout, func() (done bool, err error) {\n\t\tpvcList := corev1.PersistentVolumeClaimList{}\n\t\terr = k8client.List(goctx.TODO(), &pvcList, listOptions...)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tvar names []string\n\t\tfor i := range pvcList.Items {\n\t\t\tpvc := &pvcList.Items[i]\n\t\t\tnames = append(names, pvc.Name)\n\t\t}\n\t\tlog.Printf(\"waiting for pvc to terminate (%v)\", names)\n\t\tif len(names) != 0 {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"zookeeper cluster terminated: %s\", z.Name)\n\treturn nil\n}", "func WaitForClusterToTerminate(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, z *api.ZookeeperCluster) error {\n\tt.Logf(\"waiting for zookeeper cluster to terminate: %s\", z.Name)\n\n\tlistOptions := metav1.ListOptions{\n\t\tLabelSelector: labels.SelectorFromSet(map[string]string{\"app\": z.GetName()}).String(),\n\t}\n\n\t// Wait for Pods to terminate\n\terr := wait.Poll(RetryInterval, TerminateTimeout, func() (done bool, err error) {\n\t\tpodList, err := f.KubeClient.CoreV1().Pods(z.Namespace).List(goctx.TODO(), listOptions)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tvar names []string\n\t\tfor i := range podList.Items {\n\t\t\tpod := &podList.Items[i]\n\t\t\tnames = append(names, pod.Name)\n\t\t}\n\t\tt.Logf(\"waiting for pods to terminate, running pods (%v)\", names)\n\t\tif len(names) != 0 {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Wait for PVCs to terminate\n\terr = wait.Poll(RetryInterval, TerminateTimeout, func() (done bool, err error) {\n\t\tpvcList, err := f.KubeClient.CoreV1().PersistentVolumeClaims(z.Namespace).List(goctx.TODO(), listOptions)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tvar names []string\n\t\tfor i := range pvcList.Items {\n\t\t\tpvc := &pvcList.Items[i]\n\t\t\tnames = append(names, pvc.Name)\n\t\t}\n\t\tt.Logf(\"waiting for pvc to terminate (%v)\", names)\n\t\tif len(names) != 0 {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tt.Logf(\"zookeeper cluster terminated: %s\", z.Name)\n\treturn nil\n}", "func (v VirtualMachine) Delete() error {\n\ttctx, tcancel := context.WithTimeout(context.Background(), time.Minute*5)\n\tdefer tcancel()\n\n\tpowerState, err := v.vm.PowerState(context.Background())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Getting power state: %s\", powerState)\n\t}\n\n\tif powerState == \"poweredOn\" || powerState == \"suspended\" {\n\t\tpowerOff, err := v.vm.PowerOff(context.Background())\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Shutting down virtual machine: %s\", err)\n\t\t}\n\n\t\terr = powerOff.Wait(tctx)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Waiting for machine to shut down: %s\", err)\n\t\t}\n\t}\n\n\tdestroy, err := v.vm.Destroy(context.Background())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Destroying virtual machine: %s\", err)\n\t}\n\n\terr = destroy.Wait(tctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Waiting for machine to destroy: %s\", err)\n\t}\n\n\treturn nil\n}", "func assertDeletedWaitForCleanup(t *testing.T, cl client.Client, thing client.Object) {\n\tt.Helper()\n\tthingName := types.NamespacedName{\n\t\tName: thing.GetName(),\n\t\tNamespace: thing.GetNamespace(),\n\t}\n\tassertDeleted(t, cl, thing)\n\tif err := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) {\n\t\tif err := cl.Get(context.TODO(), thingName, thing); err != nil {\n\t\t\tif errors.IsNotFound(err) {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\treturn false, err\n\t\t}\n\t\treturn false, nil\n\t}); err != nil {\n\t\tt.Fatalf(\"Timed out waiting for %s to be cleaned up: %v\", thing.GetName(), err)\n\t}\n}", "func (c *swimCluster) Destroy() {\n\tfor _, node := range c.nodes {\n\t\tnode.Destroy()\n\t}\n\tfor _, ch := range c.channels {\n\t\tch.Close()\n\t}\n}", "func (t *TestSpec) Destroy(delay time.Duration, base string) error {\n\tmanifestToDestroy := []string{\n\t\tt.GetManifestsPath(base),\n\t\tfmt.Sprintf(\"%s/%s\", base, t.NetworkPolicyName()),\n\t\tt.Destination.GetManifestPath(t, base),\n\t}\n\n\tdone := time.After(delay)\n\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\tfor _, manifest := range manifestToDestroy {\n\t\t\t\tt.Kub.Delete(manifest)\n\t\t\t}\n\t\t}\n\t}\n}", "func (bf *BaseFlight) Destroy() {\n\tfor _, c := range bf.Clusters() {\n\t\tc.Destroy()\n\t}\n\n\tif err := bf.agent.Close(); err != nil {\n\t\tplog.Errorf(\"Error closing agent: %v\", err)\n\t}\n}", "func (m *Machine) Delete() {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tm.manager.Delete()\n\n\tif m.backoffTimer != nil {\n\t\tm.backoffTimer.Stop()\n\t}\n\n\tif m.cancel != nil {\n\t\tm.Infof(\"runner\", \"Stopping\")\n\t\tm.cancel()\n\t}\n\n\tm.startTime = time.Time{}\n}", "func (m *Manager) RemoveAllAndWait() {\n\tctrls := m.removeAll()\n\tfor _, ctrl := range ctrls {\n\t\t<-ctrl.terminated\n\t}\n}", "func (o *ClusterUninstaller) destroyCloudInstances() error {\n\tfirstPassList, err := o.listCloudInstances()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(firstPassList.list()) == 0 {\n\t\treturn nil\n\t}\n\n\titems := o.insertPendingItems(cloudInstanceTypeName, firstPassList.list())\n\tctx, cancel := o.contextWithTimeout()\n\tdefer cancel()\n\tfor _, item := range items {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\to.Logger.Debugf(\"destroyCloudInstances: case <-ctx.Done()\")\n\t\t\treturn o.Context.Err() // we're cancelled, abort\n\t\tdefault:\n\t\t}\n\n\t\tbackoff := wait.Backoff{\n\t\t\tDuration: 15 * time.Second,\n\t\t\tFactor: 1.1,\n\t\t\tCap: leftInContext(ctx),\n\t\t\tSteps: math.MaxInt32}\n\t\terr = wait.ExponentialBackoffWithContext(ctx, backoff, func(context.Context) (bool, error) {\n\t\t\terr2 := o.destroyCloudInstance(item)\n\t\t\tif err2 == nil {\n\t\t\t\treturn true, err2\n\t\t\t}\n\t\t\to.errorTracker.suppressWarning(item.key, err2, o.Logger)\n\t\t\treturn false, err2\n\t\t})\n\t\tif err != nil {\n\t\t\to.Logger.Fatal(\"destroyCloudInstances: ExponentialBackoffWithContext (destroy) returns \", err)\n\t\t}\n\t}\n\n\tif items = o.getPendingItems(cloudInstanceTypeName); len(items) > 0 {\n\t\tfor _, item := range items {\n\t\t\to.Logger.Debugf(\"destroyCloudInstances: found %s in pending items\", item.name)\n\t\t}\n\t\treturn errors.Errorf(\"destroyCloudInstances: %d undeleted items pending\", len(items))\n\t}\n\n\tbackoff := wait.Backoff{\n\t\tDuration: 15 * time.Second,\n\t\tFactor: 1.1,\n\t\tCap: leftInContext(ctx),\n\t\tSteps: math.MaxInt32}\n\terr = wait.ExponentialBackoffWithContext(ctx, backoff, func(context.Context) (bool, error) {\n\t\tsecondPassList, err2 := o.listCloudInstances()\n\t\tif err2 != nil {\n\t\t\treturn false, err2\n\t\t}\n\t\tif len(secondPassList) == 0 {\n\t\t\t// We finally don't see any remaining instances!\n\t\t\treturn true, nil\n\t\t}\n\t\tfor _, item := range secondPassList {\n\t\t\to.Logger.Debugf(\"destroyCloudInstances: found %s in second pass\", item.name)\n\t\t}\n\t\treturn false, nil\n\t})\n\tif err != nil {\n\t\to.Logger.Fatal(\"destroyCloudInstances: ExponentialBackoffWithContext (list) returns \", err)\n\t}\n\n\treturn nil\n}", "func (s *Server) CleanupForDestroy() {\n\ts.CtxCancel()\n\ts.Events().Destroy()\n\ts.DestroyAllSinks()\n\ts.Websockets().CancelAll()\n\ts.powerLock.Destroy()\n}", "func cleanDiscoveryPool() {\n\tfor {\n\t\tdiscoveryStorage.Clean()\n\t\ttime.Sleep(time.Duration(config.CleanEvery) * time.Second)\n\t}\n\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
waitForMachineController waits for machinecontroller to become running
func waitForMachineController(ctx context.Context, client dynclient.Client) error { condFn := clientutil.PodsReadyCondition(ctx, client, dynclient.ListOptions{ Namespace: resources.MachineControllerNameSpace, LabelSelector: labels.SelectorFromSet(map[string]string{ appLabelKey: resources.MachineControllerName, }), }) return fail.KubeClient(wait.PollUntilContextTimeout(ctx, 5*time.Second, 3*time.Minute, false, condFn.WithContext()), "waiting for machine-controller to became ready") }
[ "func waitForMachineState(api *cloudapi.Client, id, state string, timeout time.Duration) error {\n\treturn waitFor(\n\t\tfunc() (bool, error) {\n\t\t\tcurrentState, err := readMachineState(api, id)\n\t\t\treturn currentState == state, err\n\t\t},\n\t\tmachineStateChangeCheckInterval,\n\t\tmachineStateChangeTimeout,\n\t)\n}", "func WaitReady(ctx *util.Context) error {\n\tif !ctx.Cluster.MachineController.Deploy {\n\t\treturn nil\n\t}\n\n\tctx.Logger.Infoln(\"Waiting for machine-controller to come up…\")\n\n\t// Wait a bit to let scheduler to react\n\ttime.Sleep(10 * time.Second)\n\n\tif err := WaitForWebhook(ctx.DynamicClient); err != nil {\n\t\treturn errors.Wrap(err, \"machine-controller-webhook did not come up\")\n\t}\n\n\tif err := WaitForMachineController(ctx.DynamicClient); err != nil {\n\t\treturn errors.Wrap(err, \"machine-controller did not come up\")\n\t}\n\treturn nil\n}", "func waitForWebhook(ctx context.Context, client dynclient.Client) error {\n\tcondFn := clientutil.PodsReadyCondition(ctx, client, dynclient.ListOptions{\n\t\tNamespace: resources.MachineControllerNameSpace,\n\t\tLabelSelector: labels.SelectorFromSet(map[string]string{\n\t\t\tappLabelKey: resources.MachineControllerWebhookName,\n\t\t}),\n\t})\n\n\treturn fail.KubeClient(wait.PollUntilContextTimeout(ctx, 5*time.Second, 3*time.Minute, false, condFn.WithContext()), \"waiting for machine-controller webhook to became ready\")\n}", "func (s *FakeJujuService) startMachine(machine *state.Machine) error {\n\n\tlog.Infof(\"Starting machine %s\", machine.Id())\n\n\tnow := time.Now()\n\n\t// Set network address\n\taddress := network.NewScopedAddress(\"127.0.0.1\", network.ScopeCloudLocal)\n\tif err := machine.SetProviderAddresses(address); err != nil {\n\t\treturn err\n\t}\n\n\t// Set instance state\n\tif err := machine.SetProvisioned(s.newInstanceId(), \"nonce\", nil); err != nil {\n\t\treturn err\n\t}\n\tif err := machine.SetInstanceStatus(status.StatusInfo{\n\t\tStatus: status.Running,\n\t\tMessage: \"\",\n\t\tSince: &now,\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\t// Set agent version\n\tcurrentVersion := version.Current.String()\n\tagentVersion, err := semversion.ParseBinary(currentVersion + \"-xenial-amd64\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := machine.SetAgentVersion(agentVersion); err != nil {\n\t\treturn err\n\t}\n\n\t// Set agent status\n\tif err := machine.SetStatus(status.StatusInfo{\n\t\tStatus: status.Started,\n\t\tMessage: \"\",\n\t\tSince: &now,\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\t// Set agent presence\n\tif _, err := machine.SetAgentPresence(); err != nil {\n\t\treturn err\n\t}\n\ts.state.StartSync()\n\tif err := machine.WaitAgentPresence(MediumWait); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func WaitReady(s *state.State) error {\n\tif !s.Cluster.MachineController.Deploy {\n\t\treturn nil\n\t}\n\n\ts.Logger.Infoln(\"Waiting for machine-controller to come up...\")\n\n\tif err := cleanupStaleResources(s.Context, s.DynamicClient); err != nil {\n\t\treturn err\n\t}\n\n\tif err := waitForWebhook(s.Context, s.DynamicClient); err != nil {\n\t\treturn err\n\t}\n\n\tif err := waitForMachineController(s.Context, s.DynamicClient); err != nil {\n\t\treturn err\n\t}\n\n\treturn waitForCRDs(s)\n}", "func waitForMachineSetToExist(capiClient capiclientset.Interface, namespace, name string) error {\n\treturn waitForMachineSetStatus(\n\t\tcapiClient,\n\t\tnamespace, name,\n\t\tfunc(machineSet *capiv1alpha1.MachineSet) bool { return machineSet != nil },\n\t)\n}", "func waitForKdc() {\n\tfor {\n\t\tresp, err := bash.Run(\"check\", []string{keyDir})\n\t\tif err != nil || resp != 0 {\n\t\t\tif resp != 0 {\n\t\t\t\tfmt.Println(\"KDC is not yet available. Shell return code is \" + strconv.Itoa(resp))\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"KDC is not yet available \" + err.Error())\n\t\t\t}\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t} else {\n\t\t\treturn\n\t\t}\n\t}\n}", "func TestWaitUntilRunning(t *testing.T) {\n\tts := memorytopo.NewServer(\"cell1\")\n\tm := NewManager(ts)\n\n\t// Start it 3 times i.e. restart it 2 times.\n\tfor i := 1; i <= 3; i++ {\n\t\t// Run the manager in the background.\n\t\twg, _, cancel := StartManager(m)\n\n\t\t// Shut it down and wait for the shutdown to complete.\n\t\tcancel()\n\t\twg.Wait()\n\t}\n}", "func waitForCP(restConfig *rest.Config, clustername string) (bool, error) {\n\tlog.Info(\"Waiting for the Control Plane to appear\")\n\t// Set the vars we need\n\tcpname := clustername + \"-control-plane\"\n\tvar expectedCPReplicas int32 = 3\n\n\t// We need to load the scheme since it's not part of the core API\n\tscheme := runtime.NewScheme()\n\t_ = kcpv1.AddToScheme(scheme)\n\n\tc, err := client.New(restConfig, client.Options{\n\t\tScheme: scheme,\n\t})\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// wait up until 20 minutes\n\tcounter := 0\n\tfor runs := 20; counter <= runs; counter++ {\n\t\tif counter > runs {\n\t\t\treturn false, errors.New(\"control-plane did not come up after 10 minutes\")\n\t\t}\n\t\t// get the current status, wait for 3 CP nodes\n\t\tkcp := &kcpv1.KubeadmControlPlane{}\n\t\tif err := c.Get(context.TODO(), client.ObjectKey{Namespace: \"default\", Name: cpname}, kcp); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif kcp.Status.Replicas == expectedCPReplicas {\n\t\t\tbreak\n\t\t}\n\n\t\ttime.Sleep(time.Minute)\n\n\t}\n\n\treturn true, nil\n}", "func CheckMachine(machine string) error {\n\tbars := make([]*pb.ProgressBar, 0)\n\tvar wg sync.WaitGroup\n\tvar path = getPath()\n\tvar machinePath = filepath.Join(path, machine, machine+\".vbox\")\n\n\tfmt.Println(\"[+] Checking virtual machine\")\n\t// checking file location\n\tif !fileExists(machinePath) {\n\t\trepository, err := repo.NewRepositoryVM()\n\n\t\t// checking local repository\n\t\tif repository.GetURL() == \"\" {\n\t\t\treturn errors.New(\"URL is not set for downloading VBox image\")\n\t\t}\n\n\t\tdst := filepath.Join(repository.Dir(), repository.GetVersion())\n\t\tfileName := repository.Name()\n\n\t\t// download virtual machine\n\t\tif !fileExists(filepath.Join(dst, fileName)) {\n\t\t\tfmt.Println(\"[+] Starting virtual machine download\")\n\t\t\tvar bar1 *pb.ProgressBar\n\t\t\tvar err error\n\n\t\t\tfileName, bar1, err = repo.DownloadAsync(repository, &wg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbar1.Prefix(fmt.Sprintf(\"[+] Download %-15s\", fileName))\n\t\t\tif bar1.Total > 0 {\n\t\t\t\tbars = append(bars, bar1)\n\t\t\t}\n\t\t\tpool, err := pb.StartPool(bars...)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twg.Wait()\n\t\t\tpool.Stop()\n\n\t\t\ttime.Sleep(time.Second * 2)\n\t\t}\n\n\t\t// unzip virtual machine\n\t\terr = help.Unzip(filepath.Join(dst, fileName), path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\tif !isActive(machinePath) {\n\t\tfmt.Printf(\"[+] Registering %s\\n\", machine)\n\t\t_, err := help.ExecCmd(\"VBoxManage\",\n\t\t\t[]string{\n\t\t\t\t\"registervm\",\n\t\t\t\tfmt.Sprintf(\"%s\", machinePath),\n\t\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(\"[+] Done\")\n\t}\n\treturn nil\n}", "func (b *Botanist) WaitForControllersToBeActive(ctx context.Context) error {\n\ttype controllerInfo struct {\n\t\tname string\n\t\tlabels map[string]string\n\t}\n\n\ttype checkOutput struct {\n\t\tcontrollerName string\n\t\tready bool\n\t\terr error\n\t}\n\n\tvar (\n\t\tcontrollers = []controllerInfo{}\n\t\tpollInterval = 5 * time.Second\n\t)\n\n\t// Check whether the kube-controller-manager deployment exists\n\tif err := b.K8sSeedClient.Client().Get(ctx, kutil.Key(b.Shoot.SeedNamespace, v1beta1constants.DeploymentNameKubeControllerManager), &appsv1.Deployment{}); err == nil {\n\t\tcontrollers = append(controllers, controllerInfo{\n\t\t\tname: v1beta1constants.DeploymentNameKubeControllerManager,\n\t\t\tlabels: map[string]string{\n\t\t\t\t\"app\": \"kubernetes\",\n\t\t\t\t\"role\": \"controller-manager\",\n\t\t\t},\n\t\t})\n\t} else if client.IgnoreNotFound(err) != nil {\n\t\treturn err\n\t}\n\n\treturn retry.UntilTimeout(context.TODO(), pollInterval, 90*time.Second, func(ctx context.Context) (done bool, err error) {\n\t\tvar (\n\t\t\twg sync.WaitGroup\n\t\t\tout = make(chan *checkOutput)\n\t\t)\n\n\t\tfor _, controller := range controllers {\n\t\t\twg.Add(1)\n\n\t\t\tgo func(controller controllerInfo) {\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\tpodList := &corev1.PodList{}\n\t\t\t\terr := b.K8sSeedClient.Client().List(ctx, podList,\n\t\t\t\t\tclient.InNamespace(b.Shoot.SeedNamespace),\n\t\t\t\t\tclient.MatchingLabels(controller.labels))\n\t\t\t\tif err != nil {\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name, err: err}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// Check that only one replica of the controller exists.\n\t\t\t\tif len(podList.Items) != 1 {\n\t\t\t\t\tb.Logger.Infof(\"Waiting for %s to have exactly one replica\", controller.name)\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t// Check that the existing replica is not in getting deleted.\n\t\t\t\tif podList.Items[0].DeletionTimestamp != nil {\n\t\t\t\t\tb.Logger.Infof(\"Waiting for a new replica of %s\", controller.name)\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// Check if the controller is active by reading its leader election record.\n\t\t\t\tleaderElectionRecord, err := common.ReadLeaderElectionRecord(b.K8sShootClient, resourcelock.EndpointsResourceLock, metav1.NamespaceSystem, controller.name)\n\t\t\t\tif err != nil {\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name, err: err}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif delta := metav1.Now().UTC().Sub(leaderElectionRecord.RenewTime.Time.UTC()); delta <= pollInterval-time.Second {\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name, ready: true}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tb.Logger.Infof(\"Waiting for %s to be active\", controller.name)\n\t\t\t\tout <- &checkOutput{controllerName: controller.name}\n\t\t\t}(controller)\n\t\t}\n\n\t\tgo func() {\n\t\t\twg.Wait()\n\t\t\tclose(out)\n\t\t}()\n\n\t\tfor result := range out {\n\t\t\tif result.err != nil {\n\t\t\t\treturn retry.SevereError(fmt.Errorf(\"could not check whether controller %s is active: %+v\", result.controllerName, result.err))\n\t\t\t}\n\t\t\tif !result.ready {\n\t\t\t\treturn retry.MinorError(fmt.Errorf(\"controller %s is not active\", result.controllerName))\n\t\t\t}\n\t\t}\n\n\t\treturn retry.Ok()\n\t})\n}", "func (c *Client) ProvisionMachineWait(ipAddress string) (result *string, err error) {\n\n\tuptimeMap := uptimeCommand(ipAddress)\n\n\t// Marshall the parlay submission (runs the uptime command)\n\tb, err := json.Marshal(uptimeMap)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Create the string that will be used to get the logs\n\tdashAddress := strings.Replace(ipAddress, \".\", \"-\", -1)\n\n\t// Get the time\n\tt := time.Now()\n\t//r.Recorder.Eventf(plunderMachine, corev1.EventTypeNormal, \"PlunderProvision\", \"Plunder has begun provisioning the Operating System\")\n\n\tfor {\n\t\t// Set Parlay API path and POST\n\t\tep, resp := apiserver.FindFunctionEndpoint(c.address, c.server, \"parlay\", http.MethodPost)\n\t\tif resp.Error != \"\" {\n\t\t\treturn nil, fmt.Errorf(resp.Error)\n\n\t\t}\n\t\tc.address.Path = ep.Path\n\n\t\tresponse, err := apiserver.ParsePlunderPost(c.address, c.server, b)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If an error has been returned then handle the error gracefully and terminate\n\t\tif response.FriendlyError != \"\" || response.Error != \"\" {\n\t\t\treturn nil, fmt.Errorf(resp.Error)\n\n\t\t}\n\n\t\t// Sleep for five seconds\n\t\ttime.Sleep(5 * time.Second)\n\n\t\t// Set the parlay API get logs path and GET\n\t\tep, resp = apiserver.FindFunctionEndpoint(c.address, c.server, \"parlayLog\", http.MethodGet)\n\t\tif resp.Error != \"\" {\n\t\t\treturn nil, fmt.Errorf(resp.Error)\n\n\t\t}\n\t\tc.address.Path = ep.Path + \"/\" + dashAddress\n\n\t\tresponse, err = apiserver.ParsePlunderGet(c.address, c.server)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// If an error has been returned then handle the error gracefully and terminate\n\t\tif response.FriendlyError != \"\" || response.Error != \"\" {\n\t\t\treturn nil, fmt.Errorf(resp.Error)\n\n\t\t}\n\n\t\tvar logs plunderlogging.JSONLog\n\n\t\terr = json.Unmarshal(response.Payload, &logs)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif logs.State == \"Completed\" {\n\t\t\tprovisioningResult := fmt.Sprintf(\"Host has been succesfully provisioned OS in %s Seconds\\n\", time.Since(t).Round(time.Second))\n\t\t\t//r.Recorder.Eventf(plunderMachine, corev1.EventTypeNormal, \"PlunderProvision\", provisioningResult)\n\n\t\t\treturn &provisioningResult, nil\n\t\t}\n\t}\n\t//return nil, fmt.Errorf(\"TODO - this should never happen\")\n}", "func waitForInstanceState(\n\tdesiredState string, instanceID string, client *civogo.Client, timeout time.Duration) error {\n\tdone := make(chan struct{})\n\tdefer close(done)\n\n\tresult := make(chan error, 1)\n\tgo func() {\n\t\tattempts := 0\n\t\tfor {\n\t\t\tattempts++\n\n\t\t\tlog.Printf(\"Checking instance status... (attempt: %d)\", attempts)\n\t\t\tinstance, err := client.GetInstance(instanceID)\n\t\t\tif err != nil {\n\t\t\t\tresult <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif instance.Status == desiredState {\n\t\t\t\tresult <- nil\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Wait 3 seconds in between\n\t\t\ttime.Sleep(3 * time.Second)\n\n\t\t\t// Verify we shouldn't exit\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\t// We finished, so just exit the goroutine\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\t// Keep going\n\t\t\t}\n\t\t}\n\t}()\n\n\tlog.Printf(\"Waiting for up to %d seconds for instance to become %s\", timeout/time.Second, desiredState)\n\tselect {\n\tcase err := <-result:\n\t\treturn err\n\tcase <-time.After(timeout):\n\t\terr := fmt.Errorf(\"Timeout while waiting to for instance to become '%s'\", desiredState)\n\t\treturn err\n\t}\n}", "func waitForCRDs(s *state.State) error {\n\tcondFn := clientutil.CRDsReadyCondition(s.Context, s.DynamicClient, CRDNames())\n\terr := wait.PollUntilContextTimeout(s.Context, 5*time.Second, 3*time.Minute, false, condFn.WithContext())\n\n\treturn fail.KubeClient(err, \"waiting for machine-controller CRDs to became ready\")\n}", "func (c *EC2) WaitUntilSystemStatusOk(input *DescribeInstanceStatusInput) error {\n\treturn c.WaitUntilSystemStatusOkWithContext(aws.BackgroundContext(), input)\n}", "func (rcc *rotateCertsCmd) waitForControlPlaneReadiness() error {\n\tlog.Info(\"Checking health of control plane components\")\n\tpods := make([]string, 0)\n\tfor _, n := range rcc.cs.Properties.GetMasterVMNameList() {\n\t\tfor _, c := range []string{kubeAddonManager, kubeAPIServer, kubeControllerManager, kubeScheduler} {\n\t\t\tpods = append(pods, fmt.Sprintf(\"%s-%s\", c, n))\n\t\t}\n\t}\n\tif err := ops.WaitForReady(rcc.kubeClient, metav1.NamespaceSystem, pods, rotateCertsDefaultInterval, rotateCertsDefaultTimeout, rcc.nodes); err != nil {\n\t\treturn errors.Wrap(err, \"waiting for control plane containers to reach the Ready state within the timeout period\")\n\t}\n\treturn nil\n}", "func waitForSimulator() error {\n\tconst sleepPeriodSeconds = 2\n\tconst tries = 30\n\n\tfor i := 1; i <= tries; i++ {\n\t\tclient, clientErr := env.RAN().NewRANC1ServiceClient()\n\n\t\tif clientErr != nil {\n\t\t\treturn clientErr\n\t\t}\n\n\t\trequest := &nb.StationListRequest{\n\t\t\tSubscribe: false,\n\t\t}\n\t\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\t\tstations, stationsErr := client.ListStations(ctx, request)\n\n\t\tif stationsErr != nil {\n\t\t\tcancel()\n\t\t\treturn stationsErr\n\t\t}\n\n\t\t_, pollError := stations.Recv()\n\t\tcancel()\n\t\tif pollError != nil && pollError != io.EOF {\n\t\t\treturn pollError\n\t\t}\n\t\tif pollError == nil {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(sleepPeriodSeconds * time.Second)\n\t}\n\n\treturn errors.New(\"simulator never responded properly\")\n}", "func waitForApiServerToBeUp(svcMasterIp string, sshClientConfig *ssh.ClientConfig,\n\ttimeout time.Duration) error {\n\tkubeConfigPath := GetAndExpectStringEnvVar(gcKubeConfigPath)\n\twaitErr := wait.PollImmediate(poll, timeout, func() (bool, error) {\n\t\tcmd := fmt.Sprintf(\"kubectl get ns,sc --kubeconfig %s\",\n\t\t\tkubeConfigPath)\n\t\tframework.Logf(\"Invoking command '%v' on host %v\", cmd,\n\t\t\tsvcMasterIp)\n\t\tcmdResult, err := sshExec(sshClientConfig, svcMasterIp,\n\t\t\tcmd)\n\t\tframework.Logf(\"result %v\", cmdResult)\n\t\tif err != nil {\n\t\t\treturn false, nil\n\t\t}\n\t\tif err == nil {\n\t\t\tframework.Logf(\"Apiserver is fully up\")\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\treturn waitErr\n}", "func waitForAWSInfra(restConfig *rest.Config, clustername string) (bool, error) {\n\t// We need to load the scheme since it's not part of the core API\n\tlog.Info(\"Waiting for Infrastructure\")\n\tscheme := runtime.NewScheme()\n\terr := clusterv1.AddToScheme(scheme)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tc, err := client.New(restConfig, client.Options{\n\t\tScheme: scheme,\n\t})\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// wait up until 40 minutes\n\tcounter := 0\n\tfor runs := 20; counter <= runs; counter++ {\n\t\tif counter > runs {\n\t\t\treturn false, errors.New(\"aws infra did not come up after 40 minutes\")\n\t\t}\n\t\t// get the current status, wait for \"Provisioned\"\n\t\tcluster := &clusterv1.Cluster{}\n\t\tif err := c.Get(context.TODO(), client.ObjectKey{Namespace: \"default\", Name: clustername}, cluster); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif cluster.Status.Phase == \"Provisioned\" {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Minute)\n\n\t}\n\treturn true, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
waitForWebhook waits for machinecontrollerwebhook to become running
func waitForWebhook(ctx context.Context, client dynclient.Client) error { condFn := clientutil.PodsReadyCondition(ctx, client, dynclient.ListOptions{ Namespace: resources.MachineControllerNameSpace, LabelSelector: labels.SelectorFromSet(map[string]string{ appLabelKey: resources.MachineControllerWebhookName, }), }) return fail.KubeClient(wait.PollUntilContextTimeout(ctx, 5*time.Second, 3*time.Minute, false, condFn.WithContext()), "waiting for machine-controller webhook to became ready") }
[ "func waitForWebhook(ctx context.Context, log *zap.SugaredLogger, client ctrlruntimeclient.Client, kubermaticNamespace string) error {\n\t// wait for the webhook to be ready\n\ttimeout := 30 * time.Second\n\tendpoint := types.NamespacedName{Namespace: kubermaticNamespace, Name: \"seed-webhook\"}\n\n\tlog.Infow(\"waiting for webhook to be ready...\", \"webhook\", endpoint, \"timeout\", timeout)\n\tif err := wait.Poll(500*time.Millisecond, timeout, func() (bool, error) {\n\t\tendpoints := &corev1.Endpoints{}\n\t\tif err := client.Get(ctx, endpoint, endpoints); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn len(endpoints.Subsets) > 0, nil\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"failed to wait for webhook: %v\", err)\n\t}\n\tlog.Info(\"webhook is ready\")\n\n\treturn nil\n}", "func Test_Webhook_Failures(t *testing.T) {\n\tprojectName := \"webhooks-b\"\n\tserviceName := \"myservice\"\n\n\tdefer func(t *testing.T) {\n\t\tPrintLogsOfPods(t, []string{\"webhook-service\", \"resource-service\", \"shipyard-controller\"})\n\t}(t)\n\n\tprojectName, shipyardFilePath := CreateWebhookProject(t, projectName, serviceName)\n\tdefer DeleteFile(t, shipyardFilePath)\n\tstageName := \"dev\"\n\tsequencename := \"mysequence\"\n\t// create subscriptions for the webhook-service\n\ttaskTypes := []string{\"mytask\", \"mytask-finished\", \"othertask\", \"unallowedtask\", \"unknowntask\", \"failedtask\", \"loopback\", \"loopback2\", \"loopback3\"}\n\n\twebhookYamlWithSubscriptionIDs := webhookConfig\n\twebhookYamlWithSubscriptionIDs = getWebhookYamlWithSubscriptionIDs(t, taskTypes, projectName, webhookYamlWithSubscriptionIDs)\n\n\t// wait some time to make sure the webhook service has pulled the updated subscription\n\t<-time.After(20 * time.Second) // sorry :(\n\n\t// now, let's add an webhook.yaml file to our service\n\twebhookFilePath, err := CreateTmpFile(\"webhook.yaml\", webhookYamlWithSubscriptionIDs)\n\trequire.Nil(t, err)\n\tdefer func() {\n\t\terr := os.Remove(webhookFilePath)\n\t\tif err != nil {\n\t\t\tt.Logf(\"Could not delete tmp file: %s\", err.Error())\n\t\t}\n\n\t}()\n\n\tt.Log(\"Adding webhook.yaml to our service\")\n\t_, err = ExecuteCommand(fmt.Sprintf(\"keptn add-resource --project=%s --service=%s --resource=%s --resourceUri=webhook/webhook.yaml --all-stages\", projectName, serviceName, webhookFilePath))\n\n\trequire.Nil(t, err)\n\n\ttriggerSequenceAndVerifyTaskFinishedEvent := func(sequencename, taskFinishedType string, verify func(t *testing.T, decodedEvent map[string]interface{})) {\n\t\tt.Logf(\"triggering sequence %s in stage %s\", sequencename, stageName)\n\t\tkeptnContextID, _ := TriggerSequence(projectName, serviceName, stageName, sequencename, nil)\n\n\t\tvar taskFinishedEvent *models.KeptnContextExtendedCE\n\t\trequire.Eventually(t, func() bool {\n\t\t\ttaskFinishedEvent, err = GetLatestEventOfType(keptnContextID, projectName, stageName, keptnv2.GetFinishedEventType(taskFinishedType))\n\t\t\tif err != nil || taskFinishedEvent == nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn true\n\t\t}, 60*time.Second, 5*time.Second)\n\n\t\trequire.NotNil(t, taskFinishedEvent)\n\n\t\tdecodedEvent := map[string]interface{}{}\n\n\t\terr = keptnv2.EventDataAs(*taskFinishedEvent, &decodedEvent)\n\t\trequire.Nil(t, err)\n\n\t\tverify(t, decodedEvent)\n\n\t\t// verify that no <task>.finished.finished event is sent\n\t\tfinishedFinishedEvent, err := GetLatestEventOfType(keptnContextID, projectName, stageName, keptnv2.GetFinishedEventType(\"mytask.finished\"))\n\t\trequire.Nil(t, err)\n\t\trequire.Nil(t, finishedFinishedEvent)\n\t}\n\n\ttriggerSequenceAndVerifyTaskFinishedEvent(sequencename, \"mytask\", func(t *testing.T, decodedEvent map[string]interface{}) {\n\t\trequire.NotNil(t, decodedEvent[\"mytask\"])\n\t})\n\n\t// Now, trigger another sequence that tries to execute a webhook with a reference to an unknown variable - this should fail\n\tsequencename = \"othersequence\"\n\n\ttriggerSequenceAndVerifyTaskFinishedEvent(sequencename, \"othertask\", func(t *testing.T, decodedEvent map[string]interface{}) {\n\t\t// check the result - this time it should be set to fail because an unknown Key was referenced in the webhook\n\t\trequire.Equal(t, string(keptnv2.ResultFailed), decodedEvent[\"result\"])\n\t\trequire.Nil(t, decodedEvent[\"othertask\"])\n\t})\n\n\t// Now, trigger another sequence that tries to execute a webhook with a call to the kubernetes API - this one should fail as well\n\tsequencename = \"unallowedsequence\"\n\n\ttriggerSequenceAndVerifyTaskFinishedEvent(sequencename, \"unallowedtask\", func(t *testing.T, decodedEvent map[string]interface{}) {\n\t\t// check the result - this time it should be set to fail because an unknown Key was referenced in the webhook\n\t\trequire.Equal(t, string(keptnv2.ResultFailed), decodedEvent[\"result\"])\n\t\trequire.Nil(t, decodedEvent[\"unallowedtask\"])\n\t})\n\n\t// Now, trigger another sequence that tries to execute a webhook with a call to the localhost - this one should fail as well\n\tsequencename = \"loopbacksequence\"\n\n\ttriggerSequenceAndVerifyTaskFinishedEvent(sequencename, \"loopback\", func(t *testing.T, decodedEvent map[string]interface{}) {\n\t\t// check the result - this time it should be set to fail because an unknown Key was referenced in the webhook\n\t\trequire.Equal(t, string(keptnv2.ResultFailed), decodedEvent[\"result\"])\n\t\trequire.Nil(t, decodedEvent[\"loopback\"])\n\t})\n\n\t// Now, trigger another sequence that tries to execute a webhook with a call to the 127.0.0.1 - this one should fail as well\n\tsequencename = \"loopbacksequence2\"\n\n\ttriggerSequenceAndVerifyTaskFinishedEvent(sequencename, \"loopback2\", func(t *testing.T, decodedEvent map[string]interface{}) {\n\t\t// check the result - this time it should be set to fail because an unknown Key was referenced in the webhook\n\t\trequire.Equal(t, string(keptnv2.ResultFailed), decodedEvent[\"result\"])\n\t\trequire.Nil(t, decodedEvent[\"loopback\"])\n\t})\n\n\t// Now, trigger another sequence that tries to execute a webhook with a call to the 127.0.0.1 - this one should fail as well\n\tsequencename = \"loopbacksequence3\"\n\n\ttriggerSequenceAndVerifyTaskFinishedEvent(sequencename, \"loopback3\", func(t *testing.T, decodedEvent map[string]interface{}) {\n\t\t// check the result - this time it should be set to fail because an unknown Key was referenced in the webhook\n\t\trequire.Equal(t, string(keptnv2.ResultFailed), decodedEvent[\"result\"])\n\t\trequire.Nil(t, decodedEvent[\"loopback\"])\n\t})\n\n\t// Now, trigger another sequence that contains a task for which we don't have a webhook configured - this one should fail as well\n\tsequencename = \"sequencewithunknowntask\"\n\n\ttriggerSequenceAndVerifyTaskFinishedEvent(sequencename, \"unknowntask\", func(t *testing.T, decodedEvent map[string]interface{}) {\n\t\t// check the result - this time it should be set to fail because an unknown Key was referenced in the webhook\n\t\trequire.Equal(t, string(keptnv2.ResultFailed), decodedEvent[\"result\"])\n\t\trequire.Nil(t, decodedEvent[\"unknowntask\"])\n\t})\n\n\t// Now, trigger another sequence that contains a task which results in a HTTP error status\n\tsequencename = \"failedsequence\"\n\n\ttriggerSequenceAndVerifyTaskFinishedEvent(sequencename, \"failedtask\", func(t *testing.T, decodedEvent map[string]interface{}) {\n\t\t// check the result - this time it should be set to fail because an unknown Key was referenced in the webhook\n\t\trequire.Equal(t, string(keptnv2.ResultFailed), decodedEvent[\"result\"])\n\t\trequire.Nil(t, decodedEvent[\"failedtask\"])\n\t})\n}", "func waitForRunning(client *clientv3.Client, pod string, ctx context.Context, modRev int64) {\n\twatcher := clientv3.NewWatcher(client)\n\trch := watcher.Watch(ctx, pod, clientv3.WithRev(modRev))\n\tconst runningPhase = \"Running\"\n\tfor wresp := range rch {\n\t\tfor _, ev := range wresp.Events {\n\t\t\tjq := utils.GetJsonqQuery(ev.Kv.Value)\n\t\t\tif phase, _ := jq.String(\"status\", \"phase\"); phase == runningPhase {\n\t\t\t\twatcher.Close()\n\t\t\t}\n\t\t}\n\t}\n}", "func waitFor(ctx context.Context, eventName string) error {\n\t// TODO: timeout setting\n\tch := make(chan struct{})\n\tcctx, cancel := context.WithCancel(ctx)\n\tdebugf(`Wait for \"%s\"`, eventName)\n\tchromedp.ListenTarget(cctx, func(ev interface{}) {\n\t\tswitch e := ev.(type) {\n\t\tcase *page.EventLifecycleEvent:\n\t\t\tdebugf(\"Received lifecycle event: %s\", e.Name)\n\t\t\tif e.Name == eventName {\n\t\t\t\tcancel()\n\t\t\t\tclose(ch)\n\t\t\t}\n\t\t}\n\t})\n\tselect {\n\tcase <-ch:\n\t\treturn nil\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n\n}", "func waitFor(ctx context.Context, eventName string) error {\n\tch := make(chan struct{})\n\tcctx, cancel := context.WithCancel(ctx)\n\tchromedp.ListenTarget(cctx, func(ev interface{}) {\n\t\tswitch e := ev.(type) {\n\t\tcase *page.EventLifecycleEvent:\n\t\t\tif e.Name == eventName {\n\t\t\t\tcancel()\n\t\t\t\tclose(ch)\n\t\t\t}\n\t\t}\n\t})\n\n\tselect {\n\tcase <-ch:\n\t\treturn nil\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}", "func waitFor(ctx context.Context, eventName string) error {\n\tch := make(chan struct{})\n\tcctx, cancel := context.WithCancel(ctx)\n\tchromedp.ListenTarget(cctx, func(ev interface{}) {\n\t\tswitch e := ev.(type) {\n\t\tcase *page.EventLifecycleEvent:\n\t\t\tif e.Name == eventName {\n\t\t\t\tcancel()\n\t\t\t\tclose(ch)\n\t\t\t}\n\t\t}\n\t})\n\tselect {\n\tcase <-ch:\n\t\treturn nil\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n\n}", "func waitForAPI(ctx context.Context, client *gophercloud.ServiceClient) {\n\thttpClient := &http.Client{\n\t\tTimeout: 5 * time.Second,\n\t}\n\n\t// NOTE: Some versions of Ironic inspector returns 404 for /v1/ but 200 for /v1,\n\t// which seems to be the default behavior for Flask. Remove the trailing slash\n\t// from the client endpoint.\n\tendpoint := strings.TrimSuffix(client.Endpoint, \"/\")\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tlog.Printf(\"[DEBUG] Waiting for API to become available...\")\n\n\t\t\tr, err := httpClient.Get(endpoint)\n\t\t\tif err == nil {\n\t\t\t\tstatusCode := r.StatusCode\n\t\t\t\tr.Body.Close()\n\t\t\t\tif statusCode == http.StatusOK {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t}\n\t}\n}", "func WaitForWebhooks(config *rest.Config,\n\tmutatingWebhooks []*admissionv1.MutatingWebhookConfiguration,\n\tvalidatingWebhooks []*admissionv1.ValidatingWebhookConfiguration,\n\toptions WebhookInstallOptions) error {\n\twaitingFor := map[schema.GroupVersionKind]*sets.Set[string]{}\n\n\tfor _, hook := range mutatingWebhooks {\n\t\th := hook\n\t\tgvk, err := apiutil.GVKForObject(h, scheme.Scheme)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to get gvk for MutatingWebhookConfiguration %s: %w\", hook.GetName(), err)\n\t\t}\n\n\t\tif _, ok := waitingFor[gvk]; !ok {\n\t\t\twaitingFor[gvk] = &sets.Set[string]{}\n\t\t}\n\t\twaitingFor[gvk].Insert(h.GetName())\n\t}\n\n\tfor _, hook := range validatingWebhooks {\n\t\th := hook\n\t\tgvk, err := apiutil.GVKForObject(h, scheme.Scheme)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to get gvk for ValidatingWebhookConfiguration %s: %w\", hook.GetName(), err)\n\t\t}\n\n\t\tif _, ok := waitingFor[gvk]; !ok {\n\t\t\twaitingFor[gvk] = &sets.Set[string]{}\n\t\t}\n\t\twaitingFor[gvk].Insert(hook.GetName())\n\t}\n\n\t// Poll until all resources are found in discovery\n\tp := &webhookPoller{config: config, waitingFor: waitingFor}\n\treturn wait.PollImmediate(options.PollInterval, options.MaxTime, p.poll)\n}", "func (c *config) handleWebhook(w http.ResponseWriter, req *http.Request) {\n\tif c.verbose {\n\t\tlog.Println(\"Webhook triggered\")\n\t}\n\tdata, err := io.ReadAll(req.Body)\n\tif err != nil {\n\t\thandleError(w, err)\n\t\tc.errCounter.WithLabelValues(\"read\").Inc()\n\t\treturn\n\t}\n\n\tif c.verbose {\n\t\tlog.Println(\"Body:\", string(data))\n\t}\n\tpayload := &template.Data{}\n\tif err := json.Unmarshal(data, payload); err != nil {\n\t\thandleError(w, err)\n\t\tc.errCounter.WithLabelValues(\"unmarshal\").Inc()\n\t\treturn\n\t}\n\tif c.verbose {\n\t\tlog.Printf(\"Got: %#v\", payload)\n\t}\n\n\tc.processCurrent.Inc()\n\tstart := time.Now()\n\terr = run(c.command, c.args, amDataToEnv(payload))\n\tc.processDuration.Observe(time.Since(start).Seconds())\n\tc.processCurrent.Dec()\n\tif err != nil {\n\t\thandleError(w, err)\n\t\tc.errCounter.WithLabelValues(\"start\").Inc()\n\t}\n}", "func waitForMachineController(ctx context.Context, client dynclient.Client) error {\n\tcondFn := clientutil.PodsReadyCondition(ctx, client, dynclient.ListOptions{\n\t\tNamespace: resources.MachineControllerNameSpace,\n\t\tLabelSelector: labels.SelectorFromSet(map[string]string{\n\t\t\tappLabelKey: resources.MachineControllerName,\n\t\t}),\n\t})\n\n\treturn fail.KubeClient(wait.PollUntilContextTimeout(ctx, 5*time.Second, 3*time.Minute, false, condFn.WithContext()), \"waiting for machine-controller to became ready\")\n}", "func waitWebhookConfigurationReady(f *framework.Framework, namespace string) error {\n\tcmClient := f.VclusterClient.CoreV1().ConfigMaps(namespace + \"-markers\")\n\treturn wait.PollUntilContextTimeout(f.Context, 100*time.Millisecond, 30*time.Second, true, func(ctx context.Context) (bool, error) {\n\t\tmarker := &corev1.ConfigMap{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: string(uuid.NewUUID()),\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\tuniqueName: \"true\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\t_, err := cmClient.Create(ctx, marker, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\t// The always-deny webhook does not provide a reason, so check for the error string we expect\n\t\t\tif strings.Contains(err.Error(), \"denied\") {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\treturn false, err\n\t\t}\n\t\t// best effort cleanup of markers that are no longer needed\n\t\t_ = cmClient.Delete(ctx, marker.GetName(), metav1.DeleteOptions{})\n\t\tf.Log.Infof(\"Waiting for webhook configuration to be ready...\")\n\t\treturn false, nil\n\t})\n}", "func (mod *Module)WaitTillStartupDone(){\n\tfor mod.hasRunStartup == false{//wait for startup to stop running\n\t\ttime.Sleep(time.Millisecond*10)\n\t}\n}", "func WaitReady(ctx *util.Context) error {\n\tif !ctx.Cluster.MachineController.Deploy {\n\t\treturn nil\n\t}\n\n\tctx.Logger.Infoln(\"Waiting for machine-controller to come up…\")\n\n\t// Wait a bit to let scheduler to react\n\ttime.Sleep(10 * time.Second)\n\n\tif err := WaitForWebhook(ctx.DynamicClient); err != nil {\n\t\treturn errors.Wrap(err, \"machine-controller-webhook did not come up\")\n\t}\n\n\tif err := WaitForMachineController(ctx.DynamicClient); err != nil {\n\t\treturn errors.Wrap(err, \"machine-controller did not come up\")\n\t}\n\treturn nil\n}", "func (bot *Bot) startWebhook() (err error) {\n\terr = bot.createServer()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"startWebhook: %w\", err)\n\t}\n\n\tbot.err = make(chan error)\n\n\tgo func() {\n\t\tbot.err <- bot.webhook.Start()\n\t}()\n\n\terr = bot.DeleteWebhook()\n\tif err != nil {\n\t\tlog.Println(\"startWebhook:\", err.Error())\n\t}\n\n\terr = bot.setWebhook()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"startWebhook: %w\", err)\n\t}\n\n\treturn <-bot.err\n}", "func (c *Client) ProvisionMachineWait(ipAddress string) (result *string, err error) {\n\n\tuptimeMap := uptimeCommand(ipAddress)\n\n\t// Marshall the parlay submission (runs the uptime command)\n\tb, err := json.Marshal(uptimeMap)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Create the string that will be used to get the logs\n\tdashAddress := strings.Replace(ipAddress, \".\", \"-\", -1)\n\n\t// Get the time\n\tt := time.Now()\n\t//r.Recorder.Eventf(plunderMachine, corev1.EventTypeNormal, \"PlunderProvision\", \"Plunder has begun provisioning the Operating System\")\n\n\tfor {\n\t\t// Set Parlay API path and POST\n\t\tep, resp := apiserver.FindFunctionEndpoint(c.address, c.server, \"parlay\", http.MethodPost)\n\t\tif resp.Error != \"\" {\n\t\t\treturn nil, fmt.Errorf(resp.Error)\n\n\t\t}\n\t\tc.address.Path = ep.Path\n\n\t\tresponse, err := apiserver.ParsePlunderPost(c.address, c.server, b)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If an error has been returned then handle the error gracefully and terminate\n\t\tif response.FriendlyError != \"\" || response.Error != \"\" {\n\t\t\treturn nil, fmt.Errorf(resp.Error)\n\n\t\t}\n\n\t\t// Sleep for five seconds\n\t\ttime.Sleep(5 * time.Second)\n\n\t\t// Set the parlay API get logs path and GET\n\t\tep, resp = apiserver.FindFunctionEndpoint(c.address, c.server, \"parlayLog\", http.MethodGet)\n\t\tif resp.Error != \"\" {\n\t\t\treturn nil, fmt.Errorf(resp.Error)\n\n\t\t}\n\t\tc.address.Path = ep.Path + \"/\" + dashAddress\n\n\t\tresponse, err = apiserver.ParsePlunderGet(c.address, c.server)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// If an error has been returned then handle the error gracefully and terminate\n\t\tif response.FriendlyError != \"\" || response.Error != \"\" {\n\t\t\treturn nil, fmt.Errorf(resp.Error)\n\n\t\t}\n\n\t\tvar logs plunderlogging.JSONLog\n\n\t\terr = json.Unmarshal(response.Payload, &logs)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif logs.State == \"Completed\" {\n\t\t\tprovisioningResult := fmt.Sprintf(\"Host has been succesfully provisioned OS in %s Seconds\\n\", time.Since(t).Round(time.Second))\n\t\t\t//r.Recorder.Eventf(plunderMachine, corev1.EventTypeNormal, \"PlunderProvision\", provisioningResult)\n\n\t\t\treturn &provisioningResult, nil\n\t\t}\n\t}\n\t//return nil, fmt.Errorf(\"TODO - this should never happen\")\n}", "func waitForEvent(t *testing.T, wsc *client.WSClient, eventid string, dieOnTimeout bool, f func(), check func(string, interface{}) error) {\n\t// go routine to wait for webscoket msg\n\tgoodCh := make(chan interface{})\n\terrCh := make(chan error)\n\n\t// Read message\n\tgo func() {\n\t\tvar err error\n\tLOOP:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase r := <-wsc.ResultsCh:\n\t\t\t\tresult := new(ctypes.TMResult)\n\t\t\t\twire.ReadJSONPtr(result, r, &err)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrCh <- err\n\t\t\t\t\tbreak LOOP\n\t\t\t\t}\n\t\t\t\tevent, ok := (*result).(*ctypes.ResultEvent)\n\t\t\t\tif ok && event.Name == eventid {\n\t\t\t\t\tgoodCh <- event.Data\n\t\t\t\t\tbreak LOOP\n\t\t\t\t}\n\t\t\tcase err := <-wsc.ErrorsCh:\n\t\t\t\terrCh <- err\n\t\t\t\tbreak LOOP\n\t\t\tcase <-wsc.Quit:\n\t\t\t\tbreak LOOP\n\t\t\t}\n\t\t}\n\t}()\n\n\t// do stuff (transactions)\n\tf()\n\n\t// wait for an event or timeout\n\ttimeout := time.NewTimer(10 * time.Second)\n\tselect {\n\tcase <-timeout.C:\n\t\tif dieOnTimeout {\n\t\t\twsc.Stop()\n\t\t\tpanic(Fmt(\"%s event was not received in time\", eventid))\n\t\t}\n\t\t// else that's great, we didn't hear the event\n\t\t// and we shouldn't have\n\tcase eventData := <-goodCh:\n\t\tif dieOnTimeout {\n\t\t\t// message was received and expected\n\t\t\t// run the check\n\t\t\tif err := check(eventid, eventData); err != nil {\n\t\t\t\tpanic(err) // Show the stack trace.\n\t\t\t}\n\t\t} else {\n\t\t\twsc.Stop()\n\t\t\tpanic(Fmt(\"%s event was not expected\", eventid))\n\t\t}\n\tcase err := <-errCh:\n\t\tpanic(err) // Show the stack trace.\n\n\t}\n}", "func (c *Config) waitForFlannelFile(newLogger micrologger.Logger) error {\n\t// wait for file creation\n\tfor count := 0; ; count++ {\n\t\t// don't wait forever, if file is not created within retry limit, exit with failure\n\t\tif count > MaxRetry {\n\t\t\treturn microerror.Maskf(invalidFlannelFileError, \"After 100sec flannel file is not created. Exiting\")\n\t\t}\n\t\t// check if file exists\n\t\tif _, err := os.Stat(c.Flag.Service.FlannelFile); !os.IsNotExist(err) {\n\t\t\tbreak\n\t\t}\n\t\t_ = newLogger.Log(\"debug\", fmt.Sprintf(\"Waiting for file '%s' to be created.\", c.Flag.Service.FlannelFile))\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\t// all good\n\treturn nil\n}", "func waitForMachineState(api *cloudapi.Client, id, state string, timeout time.Duration) error {\n\treturn waitFor(\n\t\tfunc() (bool, error) {\n\t\t\tcurrentState, err := readMachineState(api, id)\n\t\t\treturn currentState == state, err\n\t\t},\n\t\tmachineStateChangeCheckInterval,\n\t\tmachineStateChangeTimeout,\n\t)\n}", "func waitForHelmRunning(ctx context.Context, configPath string) error {\n\tfor {\n\t\tcmd := exec.Command(\"helm\", \"ls\", \"--kubeconfig\", configPath)\n\t\tvar out bytes.Buffer\n\t\tcmd.Stderr = &out\n\t\tcmd.Run()\n\t\tif out.String() == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn errors.Wrap(ctx.Err(), \"timed out waiting for helm to become ready\")\n\t\tcase <-time.After(5 * time.Second):\n\t\t}\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DefaultKeymap returns a copy of the default Keymap Useful if inspection/customization is needed.
func DefaultKeymap() Keymap { return Keymap{ ansi.NEWLINE: (*Core).Enter, ansi.CARRIAGE_RETURN: (*Core).Enter, ansi.CTRL_C: (*Core).Interrupt, ansi.CTRL_D: (*Core).DeleteOrEOF, ansi.CTRL_H: (*Core).Backspace, ansi.BACKSPACE: (*Core).Backspace, ansi.CTRL_L: (*Core).Clear, ansi.CTRL_T: (*Core).SwapChars, ansi.CTRL_B: (*Core).MoveLeft, ansi.CTRL_F: (*Core).MoveRight, ansi.CTRL_P: (*Core).HistoryBack, ansi.CTRL_N: (*Core).HistoryForward, ansi.CTRL_U: (*Core).CutLineLeft, ansi.CTRL_K: (*Core).CutLineRight, ansi.CTRL_A: (*Core).MoveBeginning, ansi.CTRL_E: (*Core).MoveEnd, ansi.CTRL_W: (*Core).CutPrevWord, ansi.CTRL_Y: (*Core).Paste, // Escape sequences ansi.START_ESCAPE_SEQ: nil, ansi.META_B: (*Core).MoveWordLeft, ansi.META_LEFT: (*Core).MoveWordLeft, ansi.META_F: (*Core).MoveWordRight, ansi.META_RIGHT: (*Core).MoveWordRight, ansi.LEFT: (*Core).MoveLeft, ansi.RIGHT: (*Core).MoveRight, ansi.UP: (*Core).HistoryBack, ansi.DOWN: (*Core).HistoryForward, // Extended escape ansi.START_EXTENDED_ESCAPE_SEQ: nil, ansi.START_EXTENDED_ESCAPE_SEQ_3: nil, ansi.DELETE: (*Core).Delete, // Delete key } }
[ "func NewDefaultKeyMap() *KeyMap {\n\treturn &KeyMap{\n\t\tYes: []string{\"y\", \"Y\"},\n\t\tNo: []string{\"n\", \"N\"},\n\t\tSelectYes: []string{\"left\"},\n\t\tSelectNo: []string{\"right\"},\n\t\tToggle: []string{\"tab\"},\n\t\tSubmit: []string{\"enter\"},\n\t\tAbort: []string{\"ctrl+c\"},\n\t}\n}", "func DefaultFuncMap() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"go\": ToGo,\n\t\t\"goPrivate\": ToGoPrivate,\n\t\t\"lcFirst\": LcFirst,\n\t\t\"ucFirst\": UcFirst,\n\t}\n}", "func NewDefaultMap[K comparable, V any](f func(K) V) DefaultMap[K, V] {\n\treturn DefaultMap[K, V]{map[K]V{}, f}\n}", "func DefaultClusterMap() *ClusterMap {\n\treturn &ClusterMap{\n\t\tMap: make(map[string]*Cluster),\n\t}\n}", "func (e *Env) DefineDefaultMap(k string) (interface{}, error) {\n\tv := make(map[interface{}]interface{})\n\treturn v, e.Define(k, v)\n}", "func (manager *KeysManager) DefaultKey() *jose.JSONWebKey {\n\tif len(manager.KeyList) > 0 {\n\t\treturn manager.KeyList[0]\n\t} else {\n\t\treturn nil\n\t}\n}", "func DefaultMapper() *MapConvert {\n\tonce.Do(func() {\n\t\tv := viper.New()\n\t\tv.SetConfigType(\"yaml\")\n\t\tv.ReadConfig(defaultMappings())\n\t\tdefaultMap = &MapConvert{}\n\t\tif v.IsSet(\"gc_types\") {\n\t\t\tdefaultMap.GCTypes = gcTypes(v, make([]string, 0))\n\t\t}\n\t\tif v.IsSet(\"memory_bytes\") {\n\t\t\tdefaultMap.MemoryTypes = memoryTypes(v, make(map[string]string))\n\t\t}\n\t})\n\treturn defaultMap\n}", "func NewDefaultIDSetMap() *DefaultIDSetMap {\n\treturn &DefaultIDSetMap{}\n}", "func (i GinJwtSignAlgorithm) KeyMap() map[GinJwtSignAlgorithm]string {\n\treturn _GinJwtSignAlgorithmValueToKeyMap\n}", "func NewMapEngineDefault() *MapEngine {\n\tindex := NewS2Storage(17, 35)\n\treturn &MapEngine{\n\t\tedges: make(map[int64]map[int64]*Edge),\n\t\tvertices: make(map[int64]*Vertex),\n\t\ts2Storage: index,\n\t}\n}", "func DefaultsToMap() common.StringMap {\n\tcurrentDefaults = Defaults()\n\tif currentDefaults.ShellPath == \"\" {\n\t\ttempPath, err := common.GetBashPath(\"\")\n\t\tif err == nil {\n\t\t\tcurrentDefaults.ShellPath = tempPath\n\t\t} else {\n\t\t\tcurrentDefaults.ShellPath = globals.ShellPathValue\n\t\t}\n\t}\n\treturn common.StringMap{\n\t\t\"Version\": currentDefaults.Version,\n\t\t\"version\": currentDefaults.Version,\n\t\t\"SandboxHome\": currentDefaults.SandboxHome,\n\t\t\"sandbox-home\": currentDefaults.SandboxHome,\n\t\t\"SandboxBinary\": currentDefaults.SandboxBinary,\n\t\t\"sandbox-binary\": currentDefaults.SandboxBinary,\n\t\t\"UseSandboxCatalog\": currentDefaults.UseSandboxCatalog,\n\t\t\"use-sandbox-catalog\": currentDefaults.UseSandboxCatalog,\n\t\t\"LogSBOperations\": currentDefaults.LogSBOperations,\n\t\t\"log-sb-operations\": currentDefaults.LogSBOperations,\n\t\t\"LogDirectory\": currentDefaults.LogDirectory,\n\t\t\"log-directory\": currentDefaults.LogDirectory,\n\t\t\"ShellPath\": currentDefaults.ShellPath,\n\t\t\"shell-path\": currentDefaults.ShellPath,\n\t\t\"CookbookDirectory\": currentDefaults.CookbookDirectory,\n\t\t\"cookbook-directory\": currentDefaults.CookbookDirectory,\n\t\t\"MasterSlaveBasePort\": currentDefaults.MasterSlaveBasePort,\n\t\t\"master-slave-base-port\": currentDefaults.MasterSlaveBasePort,\n\t\t\"GroupReplicationBasePort\": currentDefaults.GroupReplicationBasePort,\n\t\t\"group-replication-base-port\": currentDefaults.GroupReplicationBasePort,\n\t\t\"GroupReplicationSpBasePort\": currentDefaults.GroupReplicationSpBasePort,\n\t\t\"group-replication-sp-base-port\": currentDefaults.GroupReplicationSpBasePort,\n\t\t\"FanInReplicationBasePort\": currentDefaults.FanInReplicationBasePort,\n\t\t\"fan-in-replication-base-port\": currentDefaults.FanInReplicationBasePort,\n\t\t\"AllMastersReplicationBasePort\": currentDefaults.AllMastersReplicationBasePort,\n\t\t\"all-masters-replication-base-port\": currentDefaults.AllMastersReplicationBasePort,\n\t\t\"MultipleBasePort\": currentDefaults.MultipleBasePort,\n\t\t\"multiple-base-port\": currentDefaults.MultipleBasePort,\n\t\t\"PxcBasePort\": currentDefaults.PxcBasePort,\n\t\t\"pxc-base-port\": currentDefaults.PxcBasePort,\n\t\t\"NdbBasePort\": currentDefaults.NdbBasePort,\n\t\t\"ndb-base-port\": currentDefaults.NdbBasePort,\n\t\t\"NdbClusterPort\": currentDefaults.NdbClusterPort,\n\t\t\"ndb-cluster-port\": currentDefaults.NdbClusterPort,\n\t\t\"GroupPortDelta\": currentDefaults.GroupPortDelta,\n\t\t\"group-port-delta\": currentDefaults.GroupPortDelta,\n\t\t\"MysqlXPortDelta\": currentDefaults.MysqlXPortDelta,\n\t\t\"mysqlx-port-delta\": currentDefaults.MysqlXPortDelta,\n\t\t\"AdminPortDelta\": currentDefaults.AdminPortDelta,\n\t\t\"admin-port-delta\": currentDefaults.AdminPortDelta,\n\t\t\"MasterName\": currentDefaults.MasterName,\n\t\t\"master-name\": currentDefaults.MasterName,\n\t\t\"MasterAbbr\": currentDefaults.MasterAbbr,\n\t\t\"master-abbr\": currentDefaults.MasterAbbr,\n\t\t\"NodePrefix\": currentDefaults.NodePrefix,\n\t\t\"node-prefix\": currentDefaults.NodePrefix,\n\t\t\"SlavePrefix\": currentDefaults.SlavePrefix,\n\t\t\"slave-prefix\": currentDefaults.SlavePrefix,\n\t\t\"SlaveAbbr\": currentDefaults.SlaveAbbr,\n\t\t\"slave-abbr\": currentDefaults.SlaveAbbr,\n\t\t\"SandboxPrefix\": currentDefaults.SandboxPrefix,\n\t\t\"sandbox-prefix\": currentDefaults.SandboxPrefix,\n\t\t\"ImportedSandboxPrefix\": currentDefaults.ImportedSandboxPrefix,\n\t\t\"imported-sandbox-prefix\": currentDefaults.ImportedSandboxPrefix,\n\t\t\"MasterSlavePrefix\": currentDefaults.MasterSlavePrefix,\n\t\t\"master-slave-prefix\": currentDefaults.MasterSlavePrefix,\n\t\t\"GroupPrefix\": currentDefaults.GroupPrefix,\n\t\t\"group-prefix\": currentDefaults.GroupPrefix,\n\t\t\"GroupSpPrefix\": currentDefaults.GroupSpPrefix,\n\t\t\"group-sp-prefix\": currentDefaults.GroupSpPrefix,\n\t\t\"MultiplePrefix\": currentDefaults.MultiplePrefix,\n\t\t\"multiple-prefix\": currentDefaults.MultiplePrefix,\n\t\t\"FanInPrefix\": currentDefaults.FanInPrefix,\n\t\t\"fan-in-prefix\": currentDefaults.FanInPrefix,\n\t\t\"AllMastersPrefix\": currentDefaults.AllMastersPrefix,\n\t\t\"all-masters-prefix\": currentDefaults.AllMastersPrefix,\n\t\t\"ReservedPorts\": currentDefaults.ReservedPorts,\n\t\t\"reserved-ports\": currentDefaults.ReservedPorts,\n\t\t\"RemoteRepository\": currentDefaults.RemoteRepository,\n\t\t\"remote-repository\": currentDefaults.RemoteRepository,\n\t\t\"RemoteIndexFile\": currentDefaults.RemoteIndexFile,\n\t\t\"remote-index-file\": currentDefaults.RemoteIndexFile,\n\t\t\"RemoteCompletionUrl\": currentDefaults.RemoteCompletionUrl,\n\t\t\"remote-completion-url\": currentDefaults.RemoteCompletionUrl,\n\t\t\"RemoteTarballUrl\": currentDefaults.RemoteTarballUrl,\n\t\t\"remote-tarball-url\": currentDefaults.RemoteTarballUrl,\n\t\t\"remote-tarballs\": currentDefaults.RemoteTarballUrl,\n\t\t\"remote-github\": currentDefaults.RemoteTarballUrl,\n\t\t\"PxcPrefix\": currentDefaults.PxcPrefix,\n\t\t\"pxc-prefix\": currentDefaults.PxcPrefix,\n\t\t\"NdbPrefix\": currentDefaults.NdbPrefix,\n\t\t\"ndb-prefix\": currentDefaults.NdbPrefix,\n\t\t\"DefaultSandboxExecutable\": currentDefaults.DefaultSandboxExecutable,\n\t\t\"default-sandbox-executable\": currentDefaults.DefaultSandboxExecutable,\n\t\t\"download-url\": currentDefaults.DownloadUrl,\n\t\t\"DownloadUrl\": currentDefaults.DownloadUrl,\n\t\t\"download-name-macos\": currentDefaults.DownloadNameMacOs,\n\t\t\"DownloadNameMacOs\": currentDefaults.DownloadNameMacOs,\n\t\t\"download-name-linux\": currentDefaults.DownloadNameLinux,\n\t\t\"DownloadNameLinux\": currentDefaults.DownloadNameLinux,\n\t\t\"Timestamp\": currentDefaults.Timestamp,\n\t\t\"timestamp\": currentDefaults.Timestamp,\n\t}\n}", "func DefaultKeyGetter(c *gin.Context) (string, bool) {\n\treturn c.ClientIP(), true\n}", "func DefaultKeyFilters() []string {\n\treturn []string{\"^shardVersion$\"}\n}", "func (s *encryptionAccess) setDefaultKey(defaultKey *storj.Key) {\n\ts.store.SetDefaultKey(defaultKey)\n}", "func KeyToDefaultPath(key Key) (string, error) {\n\treturn key.defaultPath()\n}", "func (i SNSProtocol) KeyMap() map[SNSProtocol]string {\n\treturn _SNSProtocolValueToKeyMap\n}", "func DefaultKeyGetter(c *gin.Context) string {\n\treturn c.ClientIP()\n}", "func (c *WorkflowNodeContext) DefaultPayloadToMap() (map[string]string, error) {\n\tif c == nil {\n\t\treturn nil, fmt.Errorf(\"Workflow node context is nil\")\n\t}\n\tif c.DefaultPayload == nil {\n\t\treturn map[string]string{}, nil\n\t}\n\tdumper := dump.NewDefaultEncoder()\n\tdumper.ExtraFields.DetailedMap = false\n\tdumper.ExtraFields.DetailedStruct = false\n\tdumper.ExtraFields.Len = false\n\tdumper.ExtraFields.Type = false\n\treturn dumper.ToStringMap(c.DefaultPayload)\n}", "func IsDefaultKey(key string) bool {\n\tfor _, k := range defaultKeys {\n\t\tif k == key {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CreateFileSystem invokes the dfs.CreateFileSystem API synchronously
func (client *Client) CreateFileSystem(request *CreateFileSystemRequest) (response *CreateFileSystemResponse, err error) { response = CreateCreateFileSystemResponse() err = client.DoAction(request, response) return }
[ "func (client StorageGatewayClient) CreateFileSystem(ctx context.Context, request CreateFileSystemRequest) (response CreateFileSystemResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\n\tif !(request.OpcRetryToken != nil && *request.OpcRetryToken != \"\") {\n\t\trequest.OpcRetryToken = common.String(common.RetryToken())\n\t}\n\n\tociResponse, err = common.Retry(ctx, request, client.createFileSystem, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = CreateFileSystemResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = CreateFileSystemResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(CreateFileSystemResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into CreateFileSystemResponse\")\n\t}\n\treturn\n}", "func (z *zfsctl) CreateFileSystem(ctx context.Context, name string, properties map[string]string) *execute {\n\targs := []string{\"create\", \"-p\"}\n\tif properties != nil {\n\t\tkv := \"-o \"\n\t\tfor k, v := range properties {\n\t\t\tkv += fmt.Sprintf(\"%s=%s \", k, v)\n\t\t}\n\t\targs = append(args, kv)\n\t}\n\targs = append(args, name)\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func (c *MockFileStorageClient) CreateFileSystem(ctx context.Context, details filestorage.CreateFileSystemDetails) (*filestorage.FileSystem, error) {\n\treturn &filestorage.FileSystem{Id: &fileSystemID}, nil\n}", "func FileSystemCreate(f types.Filesystem) error {\n\tvar cmd *exec.Cmd\n\tvar debugCMD string\n\n\tswitch f.Mount.Format {\n\tcase \"swap\":\n\t\tcmd = exec.Command(\"/sbin/mkswap\", f.Mount.Device)\n\t\tdebugCMD = fmt.Sprintf(\"%s %s\", \"/sbin/mkswap\", f.Mount.Device)\n\tcase \"ext4\", \"ext3\", \"ext2\":\n\t\t// Add filesystem flags\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, \"-t\")\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, f.Mount.Format)\n\n\t\t// Add force\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, \"-F\")\n\n\t\t// Add Device to formate\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, f.Mount.Device)\n\n\t\t// Format disk\n\t\tcmd = exec.Command(\"/sbin/mke2fs\", f.Mount.Create.Options...)\n\t\tfor i := range f.Mount.Create.Options {\n\t\t\tdebugCMD = fmt.Sprintf(\"%s %s\", debugCMD, f.Mount.Create.Options[i])\n\t\t}\n\tcase \"vfat\":\n\t\tcmd = exec.Command(\"/sbin/mkfs.fat\", f.Mount.Device)\n\t\tdebugCMD = fmt.Sprintf(\"%s %s\", \"/sbin/mkfs.fat\", f.Mount.Device)\n\tdefault:\n\t\tlog.Warnf(\"Unknown filesystem type [%s]\", f.Mount.Format)\n\t}\n\tcmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr\n\n\terr := cmd.Start()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Command [%s] Filesystem [%v]\", debugCMD, err)\n\t}\n\terr = cmd.Wait()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Command [%s] Filesystem [%v]\", debugCMD, err)\n\t}\n\n\treturn nil\n}", "func (client *Client) CreateFileSystemWithCallback(request *CreateFileSystemRequest, callback func(response *CreateFileSystemResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *CreateFileSystemResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.CreateFileSystem(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client StorageGatewayClient) createFileSystem(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/storageGateways/{storageGatewayId}/fileSystems\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateFileSystemResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client *Client) CreateFileSystemWithChan(request *CreateFileSystemRequest) (<-chan *CreateFileSystemResponse, <-chan error) {\n\tresponseChan := make(chan *CreateFileSystemResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.CreateFileSystem(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func CreateCreateFileSystemRequest() (request *CreateFileSystemRequest) {\n\trequest = &CreateFileSystemRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"DFS\", \"2018-06-20\", \"CreateFileSystem\", \"alidfs\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateFilesystem(name string, properties map[string]string) (*Dataset, error) {\n\targs := make([]string, 1, 4)\n\targs[0] = \"create\"\n\n\tif properties != nil {\n\t\targs = append(args, propsSlice(properties)...)\n\t}\n\n\targs = append(args, name)\n\t_, err := zfs(args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn GetDataset(name)\n}", "func (f *FilesystemOperation) Create(name, namespace string) error {\n\tlogger.Infof(\"creating the filesystem via CRD\")\n\tif _, err := f.k8sh.ResourceOperation(\"create\", f.manifests.GetFilesystem(namespace, name, 2)); err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Infof(\"Make sure rook-ceph-mds pod is running\")\n\terr := f.k8sh.WaitForLabeledPodsToRun(fmt.Sprintf(\"rook_file_system=%s\", name), namespace)\n\tassert.Nil(f.k8sh.T(), err)\n\n\tassert.True(f.k8sh.T(), f.k8sh.CheckPodCountAndState(\"rook-ceph-mds\", namespace, 4, \"Running\"),\n\t\t\"Make sure there are four rook-ceph-mds pods present in Running state\")\n\n\treturn nil\n}", "func (*FileSystemBase) Create(path string, flags int, mode uint32) (int, uint64) {\n\treturn -ENOSYS, ^uint64(0)\n}", "func CreateFilesystemOperation(k8sh *utils.K8sHelper, manifests installer.CephManifests) *FilesystemOperation {\n\treturn &FilesystemOperation{k8sh, manifests}\n}", "func (z *ZfsH) CreateFilesystem(name string, properties map[string]string) (*Dataset, error) {\n\targs := make([]string, 1, 4)\n\targs[0] = \"create\"\n\n\tif properties != nil {\n\t\targs = append(args, propsSlice(properties)...)\n\t}\n\n\targs = append(args, name)\n\t_, err := z.zfs(args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn z.GetDataset(name)\n}", "func (s *storageModule) CreateFilesystem(name string, size uint64, poolType pkg.DeviceType) (string, error) {\n\tlog.Info().Msgf(\"Creating new volume with size %d\", size)\n\tif strings.HasPrefix(name, \"zdb\") {\n\t\treturn \"\", fmt.Errorf(\"invalid volume name. zdb prefix is reserved\")\n\t}\n\n\tfs, err := s.createSubvol(size, name, poolType)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn fs.Path(), nil\n}", "func MakeFsOnDisk() FileSystem { return filesys.MakeFsOnDisk() }", "func (c *Client) CreateFS(args *CreateFSArgs) (*CreateFSResult, error) {\n\tif args == nil || len(args.Name) == 0 {\n\t\treturn nil, fmt.Errorf(\"unset fs name\")\n\t}\n\n\tif len(args.Zone) == 0 {\n\t\treturn nil, fmt.Errorf(\"unset zone\")\n\t}\n\n\tresult := &CreateFSResult{}\n\terr := bce.NewRequestBuilder(c).\n\t\tWithMethod(http.POST).\n\t\tWithURL(getCFSUri()).\n\t\tWithQueryParamFilter(\"clientToken\", args.ClientToken).\n\t\tWithBody(args).\n\t\tWithResult(result).\n\t\tDo()\n\n\treturn result, err\n}", "func (p *StoragePlan) InitFileSystem(client server.MetaClient) error {\n\tvar err error\n\tif err = os.RemoveAll(p.DatabasePath); err != nil {\n\t\treturn err\n\t}\n\n\tminT, maxT := p.TimeRange()\n\n\tgroups, err := client.NodeShardGroupsByTimeRange(p.Database, p.Retention, minT, maxT)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.groups = groups\n\n\tfor i := 0; i < len(groups); i++ {\n\t\tsgi := &groups[i]\n\t\tif len(sgi.Shards) > 1 {\n\t\t\treturn fmt.Errorf(\"multiple shards for the same owner %v\", sgi.Shards[0].Owners)\n\t\t}\n\n\t\tif err = os.MkdirAll(filepath.Join(p.ShardPath(), strconv.Itoa(int(sgi.Shards[0].ID))), 0777); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tp.info = client.Database(p.Database)\n\n\treturn nil\n}", "func Create(fsys fs.FS, name string) (WriterFile, error) {\n\tcfs, ok := fsys.(CreateFS)\n\tif !ok {\n\t\treturn nil, &fs.PathError{Op: \"create\", Path: name, Err: fmt.Errorf(\"not implemented on type %T\", fsys)}\n\t}\n\treturn cfs.Create(name)\n}", "func newFileSystem(basedir string, mkdir osMkdirAll) (*FS, error) {\n\tif err := mkdir(basedir, 0700); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &FS{basedir: basedir}, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CreateFileSystemWithChan invokes the dfs.CreateFileSystem API asynchronously
func (client *Client) CreateFileSystemWithChan(request *CreateFileSystemRequest) (<-chan *CreateFileSystemResponse, <-chan error) { responseChan := make(chan *CreateFileSystemResponse, 1) errChan := make(chan error, 1) err := client.AddAsyncTask(func() { defer close(responseChan) defer close(errChan) response, err := client.CreateFileSystem(request) if err != nil { errChan <- err } else { responseChan <- response } }) if err != nil { errChan <- err close(responseChan) close(errChan) } return responseChan, errChan }
[ "func (client *Client) CreateFileSystemWithCallback(request *CreateFileSystemRequest, callback func(response *CreateFileSystemResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *CreateFileSystemResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.CreateFileSystem(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (z *zfsctl) CreateFileSystem(ctx context.Context, name string, properties map[string]string) *execute {\n\targs := []string{\"create\", \"-p\"}\n\tif properties != nil {\n\t\tkv := \"-o \"\n\t\tfor k, v := range properties {\n\t\t\tkv += fmt.Sprintf(\"%s=%s \", k, v)\n\t\t}\n\t\targs = append(args, kv)\n\t}\n\targs = append(args, name)\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func (client *Client) CreateFileSystem(request *CreateFileSystemRequest) (response *CreateFileSystemResponse, err error) {\n\tresponse = CreateCreateFileSystemResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func (client *Client) ListFileSystemsWithChan(request *ListFileSystemsRequest) (<-chan *ListFileSystemsResponse, <-chan error) {\n\tresponseChan := make(chan *ListFileSystemsResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.ListFileSystems(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func FileSystemCreate(f types.Filesystem) error {\n\tvar cmd *exec.Cmd\n\tvar debugCMD string\n\n\tswitch f.Mount.Format {\n\tcase \"swap\":\n\t\tcmd = exec.Command(\"/sbin/mkswap\", f.Mount.Device)\n\t\tdebugCMD = fmt.Sprintf(\"%s %s\", \"/sbin/mkswap\", f.Mount.Device)\n\tcase \"ext4\", \"ext3\", \"ext2\":\n\t\t// Add filesystem flags\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, \"-t\")\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, f.Mount.Format)\n\n\t\t// Add force\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, \"-F\")\n\n\t\t// Add Device to formate\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, f.Mount.Device)\n\n\t\t// Format disk\n\t\tcmd = exec.Command(\"/sbin/mke2fs\", f.Mount.Create.Options...)\n\t\tfor i := range f.Mount.Create.Options {\n\t\t\tdebugCMD = fmt.Sprintf(\"%s %s\", debugCMD, f.Mount.Create.Options[i])\n\t\t}\n\tcase \"vfat\":\n\t\tcmd = exec.Command(\"/sbin/mkfs.fat\", f.Mount.Device)\n\t\tdebugCMD = fmt.Sprintf(\"%s %s\", \"/sbin/mkfs.fat\", f.Mount.Device)\n\tdefault:\n\t\tlog.Warnf(\"Unknown filesystem type [%s]\", f.Mount.Format)\n\t}\n\tcmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr\n\n\terr := cmd.Start()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Command [%s] Filesystem [%v]\", debugCMD, err)\n\t}\n\terr = cmd.Wait()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Command [%s] Filesystem [%v]\", debugCMD, err)\n\t}\n\n\treturn nil\n}", "func (c *MockFileStorageClient) CreateFileSystem(ctx context.Context, details filestorage.CreateFileSystemDetails) (*filestorage.FileSystem, error) {\n\treturn &filestorage.FileSystem{Id: &fileSystemID}, nil\n}", "func (client StorageGatewayClient) CreateFileSystem(ctx context.Context, request CreateFileSystemRequest) (response CreateFileSystemResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\n\tif !(request.OpcRetryToken != nil && *request.OpcRetryToken != \"\") {\n\t\trequest.OpcRetryToken = common.String(common.RetryToken())\n\t}\n\n\tociResponse, err = common.Retry(ctx, request, client.createFileSystem, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = CreateFileSystemResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = CreateFileSystemResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(CreateFileSystemResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into CreateFileSystemResponse\")\n\t}\n\treturn\n}", "func (client *Client) CreateFileDetectWithChan(request *CreateFileDetectRequest) (<-chan *CreateFileDetectResponse, <-chan error) {\n\tresponseChan := make(chan *CreateFileDetectResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.CreateFileDetect(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (client StorageGatewayClient) createFileSystem(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/storageGateways/{storageGatewayId}/fileSystems\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateFileSystemResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func MakeFsOnDisk() FileSystem { return filesys.MakeFsOnDisk() }", "func CreateCreateFileSystemRequest() (request *CreateFileSystemRequest) {\n\trequest = &CreateFileSystemRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"DFS\", \"2018-06-20\", \"CreateFileSystem\", \"alidfs\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func newFileSystem(basedir string, mkdir osMkdirAll) (*FS, error) {\n\tif err := mkdir(basedir, 0700); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &FS{basedir: basedir}, nil\n}", "func NewFileSystemClient() (FileSystemClient, error) {\n\taddress := os.Getenv(constants.EnvFileSystemAddress)\n\tif address == \"\" {\n\t\treturn nil, fmt.Errorf(\"Environment variable '%s' not set\", constants.EnvFileSystemAddress)\n\t}\n\n\t// Create a connection\n\tconn, err := grpc.Dial(address, grpc.WithInsecure())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create a client\n\tc := fs.NewFileSystemClient(conn)\n\n\treturn &fileSystemClient{c: c, conn: conn}, nil\n}", "func NewFileSystem(addr string, opts ...grpc.DialOption) (*FileSystem, error) {\n\tvar conn grpc.ClientConnInterface\n\tvar err error\n\n\tif len(opts) == 0 {\n\t\t// if no options are provided, default to insecure connection\n\t\tconn, err = grpc.Dial(addr, grpc.WithInsecure())\n\t} else {\n\t\t// use the options provided by the caller\n\t\tconn, err = grpc.Dial(addr, opts...)\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfs := &FileSystem{\n\t\tclient: proto.NewFileSystemClient(conn),\n\t\t// TODO(student): initialize additional fields here (if any)\n\t}\n\n\treturn fs, nil\n}", "func (*FileSystemBase) Create(path string, flags int, mode uint32) (int, uint64) {\n\treturn -ENOSYS, ^uint64(0)\n}", "func (c *Client) CreateFS(args *CreateFSArgs) (*CreateFSResult, error) {\n\tif args == nil || len(args.Name) == 0 {\n\t\treturn nil, fmt.Errorf(\"unset fs name\")\n\t}\n\n\tif len(args.Zone) == 0 {\n\t\treturn nil, fmt.Errorf(\"unset zone\")\n\t}\n\n\tresult := &CreateFSResult{}\n\terr := bce.NewRequestBuilder(c).\n\t\tWithMethod(http.POST).\n\t\tWithURL(getCFSUri()).\n\t\tWithQueryParamFilter(\"clientToken\", args.ClientToken).\n\t\tWithBody(args).\n\t\tWithResult(result).\n\t\tDo()\n\n\treturn result, err\n}", "func CreateFilesystem(name string, properties map[string]string) (*Dataset, error) {\n\targs := make([]string, 1, 4)\n\targs[0] = \"create\"\n\n\tif properties != nil {\n\t\targs = append(args, propsSlice(properties)...)\n\t}\n\n\targs = append(args, name)\n\t_, err := zfs(args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn GetDataset(name)\n}", "func (client *Client) ListFileSystemsWithCallback(request *ListFileSystemsRequest, callback func(response *ListFileSystemsResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *ListFileSystemsResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.ListFileSystems(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client *Client) CreateFileDetectWithCallback(request *CreateFileDetectRequest, callback func(response *CreateFileDetectResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *CreateFileDetectResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.CreateFileDetect(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CreateFileSystemWithCallback invokes the dfs.CreateFileSystem API asynchronously
func (client *Client) CreateFileSystemWithCallback(request *CreateFileSystemRequest, callback func(response *CreateFileSystemResponse, err error)) <-chan int { result := make(chan int, 1) err := client.AddAsyncTask(func() { var response *CreateFileSystemResponse var err error defer close(result) response, err = client.CreateFileSystem(request) callback(response, err) result <- 1 }) if err != nil { defer close(result) callback(nil, err) result <- 0 } return result }
[ "func (client *Client) CreateFileSystemWithChan(request *CreateFileSystemRequest) (<-chan *CreateFileSystemResponse, <-chan error) {\n\tresponseChan := make(chan *CreateFileSystemResponse, 1)\n\terrChan := make(chan error, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tdefer close(responseChan)\n\t\tdefer close(errChan)\n\t\tresponse, err := client.CreateFileSystem(request)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t} else {\n\t\t\tresponseChan <- response\n\t\t}\n\t})\n\tif err != nil {\n\t\terrChan <- err\n\t\tclose(responseChan)\n\t\tclose(errChan)\n\t}\n\treturn responseChan, errChan\n}", "func (c *MockFileStorageClient) CreateFileSystem(ctx context.Context, details filestorage.CreateFileSystemDetails) (*filestorage.FileSystem, error) {\n\treturn &filestorage.FileSystem{Id: &fileSystemID}, nil\n}", "func (client *Client) CreateFileSystem(request *CreateFileSystemRequest) (response *CreateFileSystemResponse, err error) {\n\tresponse = CreateCreateFileSystemResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func (client StorageGatewayClient) CreateFileSystem(ctx context.Context, request CreateFileSystemRequest) (response CreateFileSystemResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\n\tif !(request.OpcRetryToken != nil && *request.OpcRetryToken != \"\") {\n\t\trequest.OpcRetryToken = common.String(common.RetryToken())\n\t}\n\n\tociResponse, err = common.Retry(ctx, request, client.createFileSystem, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = CreateFileSystemResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = CreateFileSystemResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(CreateFileSystemResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into CreateFileSystemResponse\")\n\t}\n\treturn\n}", "func (z *zfsctl) CreateFileSystem(ctx context.Context, name string, properties map[string]string) *execute {\n\targs := []string{\"create\", \"-p\"}\n\tif properties != nil {\n\t\tkv := \"-o \"\n\t\tfor k, v := range properties {\n\t\t\tkv += fmt.Sprintf(\"%s=%s \", k, v)\n\t\t}\n\t\targs = append(args, kv)\n\t}\n\targs = append(args, name)\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func FileSystemCreate(f types.Filesystem) error {\n\tvar cmd *exec.Cmd\n\tvar debugCMD string\n\n\tswitch f.Mount.Format {\n\tcase \"swap\":\n\t\tcmd = exec.Command(\"/sbin/mkswap\", f.Mount.Device)\n\t\tdebugCMD = fmt.Sprintf(\"%s %s\", \"/sbin/mkswap\", f.Mount.Device)\n\tcase \"ext4\", \"ext3\", \"ext2\":\n\t\t// Add filesystem flags\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, \"-t\")\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, f.Mount.Format)\n\n\t\t// Add force\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, \"-F\")\n\n\t\t// Add Device to formate\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, f.Mount.Device)\n\n\t\t// Format disk\n\t\tcmd = exec.Command(\"/sbin/mke2fs\", f.Mount.Create.Options...)\n\t\tfor i := range f.Mount.Create.Options {\n\t\t\tdebugCMD = fmt.Sprintf(\"%s %s\", debugCMD, f.Mount.Create.Options[i])\n\t\t}\n\tcase \"vfat\":\n\t\tcmd = exec.Command(\"/sbin/mkfs.fat\", f.Mount.Device)\n\t\tdebugCMD = fmt.Sprintf(\"%s %s\", \"/sbin/mkfs.fat\", f.Mount.Device)\n\tdefault:\n\t\tlog.Warnf(\"Unknown filesystem type [%s]\", f.Mount.Format)\n\t}\n\tcmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr\n\n\terr := cmd.Start()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Command [%s] Filesystem [%v]\", debugCMD, err)\n\t}\n\terr = cmd.Wait()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Command [%s] Filesystem [%v]\", debugCMD, err)\n\t}\n\n\treturn nil\n}", "func (client *Client) ListFileSystemsWithCallback(request *ListFileSystemsRequest, callback func(response *ListFileSystemsResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *ListFileSystemsResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.ListFileSystems(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func (client StorageGatewayClient) createFileSystem(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/storageGateways/{storageGatewayId}/fileSystems\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateFileSystemResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func CreateCreateFileSystemRequest() (request *CreateFileSystemRequest) {\n\trequest = &CreateFileSystemRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"DFS\", \"2018-06-20\", \"CreateFileSystem\", \"alidfs\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func newFileSystem(basedir string, mkdir osMkdirAll) (*FS, error) {\n\tif err := mkdir(basedir, 0700); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &FS{basedir: basedir}, nil\n}", "func (*FileSystemBase) Create(path string, flags int, mode uint32) (int, uint64) {\n\treturn -ENOSYS, ^uint64(0)\n}", "func CreateFilesystemOperation(k8sh *utils.K8sHelper, manifests installer.CephManifests) *FilesystemOperation {\n\treturn &FilesystemOperation{k8sh, manifests}\n}", "func CreateFilesystem(name string, properties map[string]string) (*Dataset, error) {\n\targs := make([]string, 1, 4)\n\targs[0] = \"create\"\n\n\tif properties != nil {\n\t\targs = append(args, propsSlice(properties)...)\n\t}\n\n\targs = append(args, name)\n\t_, err := zfs(args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn GetDataset(name)\n}", "func GenerateFileSystem(resp *svcsdk.DescribeFileSystemsOutput) *svcapitypes.FileSystem {\n\tcr := &svcapitypes.FileSystem{}\n\n\tfound := false\n\tfor _, elem := range resp.FileSystems {\n\t\tif elem.AvailabilityZoneId != nil {\n\t\t\tcr.Status.AtProvider.AvailabilityZoneID = elem.AvailabilityZoneId\n\t\t} else {\n\t\t\tcr.Status.AtProvider.AvailabilityZoneID = nil\n\t\t}\n\t\tif elem.AvailabilityZoneName != nil {\n\t\t\tcr.Spec.ForProvider.AvailabilityZoneName = elem.AvailabilityZoneName\n\t\t} else {\n\t\t\tcr.Spec.ForProvider.AvailabilityZoneName = nil\n\t\t}\n\t\tif elem.CreationTime != nil {\n\t\t\tcr.Status.AtProvider.CreationTime = &metav1.Time{*elem.CreationTime}\n\t\t} else {\n\t\t\tcr.Status.AtProvider.CreationTime = nil\n\t\t}\n\t\tif elem.CreationToken != nil {\n\t\t\tcr.Status.AtProvider.CreationToken = elem.CreationToken\n\t\t} else {\n\t\t\tcr.Status.AtProvider.CreationToken = nil\n\t\t}\n\t\tif elem.Encrypted != nil {\n\t\t\tcr.Spec.ForProvider.Encrypted = elem.Encrypted\n\t\t} else {\n\t\t\tcr.Spec.ForProvider.Encrypted = nil\n\t\t}\n\t\tif elem.FileSystemArn != nil {\n\t\t\tcr.Status.AtProvider.FileSystemARN = elem.FileSystemArn\n\t\t} else {\n\t\t\tcr.Status.AtProvider.FileSystemARN = nil\n\t\t}\n\t\tif elem.FileSystemId != nil {\n\t\t\tcr.Status.AtProvider.FileSystemID = elem.FileSystemId\n\t\t} else {\n\t\t\tcr.Status.AtProvider.FileSystemID = nil\n\t\t}\n\t\tif elem.KmsKeyId != nil {\n\t\t\tcr.Spec.ForProvider.KMSKeyID = elem.KmsKeyId\n\t\t} else {\n\t\t\tcr.Spec.ForProvider.KMSKeyID = nil\n\t\t}\n\t\tif elem.LifeCycleState != nil {\n\t\t\tcr.Status.AtProvider.LifeCycleState = elem.LifeCycleState\n\t\t} else {\n\t\t\tcr.Status.AtProvider.LifeCycleState = nil\n\t\t}\n\t\tif elem.Name != nil {\n\t\t\tcr.Status.AtProvider.Name = elem.Name\n\t\t} else {\n\t\t\tcr.Status.AtProvider.Name = nil\n\t\t}\n\t\tif elem.NumberOfMountTargets != nil {\n\t\t\tcr.Status.AtProvider.NumberOfMountTargets = elem.NumberOfMountTargets\n\t\t} else {\n\t\t\tcr.Status.AtProvider.NumberOfMountTargets = nil\n\t\t}\n\t\tif elem.OwnerId != nil {\n\t\t\tcr.Status.AtProvider.OwnerID = elem.OwnerId\n\t\t} else {\n\t\t\tcr.Status.AtProvider.OwnerID = nil\n\t\t}\n\t\tif elem.PerformanceMode != nil {\n\t\t\tcr.Spec.ForProvider.PerformanceMode = elem.PerformanceMode\n\t\t} else {\n\t\t\tcr.Spec.ForProvider.PerformanceMode = nil\n\t\t}\n\t\tif elem.SizeInBytes != nil {\n\t\t\tf13 := &svcapitypes.FileSystemSize{}\n\t\t\tif elem.SizeInBytes.Timestamp != nil {\n\t\t\t\tf13.Timestamp = &metav1.Time{*elem.SizeInBytes.Timestamp}\n\t\t\t}\n\t\t\tif elem.SizeInBytes.Value != nil {\n\t\t\t\tf13.Value = elem.SizeInBytes.Value\n\t\t\t}\n\t\t\tif elem.SizeInBytes.ValueInIA != nil {\n\t\t\t\tf13.ValueInIA = elem.SizeInBytes.ValueInIA\n\t\t\t}\n\t\t\tif elem.SizeInBytes.ValueInStandard != nil {\n\t\t\t\tf13.ValueInStandard = elem.SizeInBytes.ValueInStandard\n\t\t\t}\n\t\t\tcr.Status.AtProvider.SizeInBytes = f13\n\t\t} else {\n\t\t\tcr.Status.AtProvider.SizeInBytes = nil\n\t\t}\n\t\tif elem.Tags != nil {\n\t\t\tf14 := []*svcapitypes.Tag{}\n\t\t\tfor _, f14iter := range elem.Tags {\n\t\t\t\tf14elem := &svcapitypes.Tag{}\n\t\t\t\tif f14iter.Key != nil {\n\t\t\t\t\tf14elem.Key = f14iter.Key\n\t\t\t\t}\n\t\t\t\tif f14iter.Value != nil {\n\t\t\t\t\tf14elem.Value = f14iter.Value\n\t\t\t\t}\n\t\t\t\tf14 = append(f14, f14elem)\n\t\t\t}\n\t\t\tcr.Spec.ForProvider.Tags = f14\n\t\t} else {\n\t\t\tcr.Spec.ForProvider.Tags = nil\n\t\t}\n\t\tif elem.ThroughputMode != nil {\n\t\t\tcr.Spec.ForProvider.ThroughputMode = elem.ThroughputMode\n\t\t} else {\n\t\t\tcr.Spec.ForProvider.ThroughputMode = nil\n\t\t}\n\t\tfound = true\n\t\tbreak\n\t}\n\tif !found {\n\t\treturn cr\n\t}\n\n\treturn cr\n}", "func (client *Client) CreateFileDetectWithCallback(request *CreateFileDetectRequest, callback func(response *CreateFileDetectResponse, err error)) <-chan int {\n\tresult := make(chan int, 1)\n\terr := client.AddAsyncTask(func() {\n\t\tvar response *CreateFileDetectResponse\n\t\tvar err error\n\t\tdefer close(result)\n\t\tresponse, err = client.CreateFileDetect(request)\n\t\tcallback(response, err)\n\t\tresult <- 1\n\t})\n\tif err != nil {\n\t\tdefer close(result)\n\t\tcallback(nil, err)\n\t\tresult <- 0\n\t}\n\treturn result\n}", "func CreateCreateFileSystemResponse() (response *CreateFileSystemResponse) {\n\tresponse = &CreateFileSystemResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func MakeFsOnDisk() FileSystem { return filesys.MakeFsOnDisk() }", "func NewFileSystem(token string, debug bool) *FileSystem {\n\toauthClient := oauth2.NewClient(\n\t\toauth2.NoContext,\n\t\toauth2.StaticTokenSource(&oauth2.Token{AccessToken: token}),\n\t)\n\tclient := putio.NewClient(oauthClient)\n\tclient.UserAgent = defaultUserAgent\n\n\treturn &FileSystem{\n\t\tputio: client,\n\t\tlogger: NewLogger(\"putiofs: \", debug),\n\t}\n}", "func (fs ReverseHttpFs) Create(n string) (afero.File, error) {\n\treturn nil, syscall.EPERM\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CreateCreateFileSystemRequest creates a request to invoke CreateFileSystem API
func CreateCreateFileSystemRequest() (request *CreateFileSystemRequest) { request = &CreateFileSystemRequest{ RpcRequest: &requests.RpcRequest{}, } request.InitWithApiInfo("DFS", "2018-06-20", "CreateFileSystem", "alidfs", "openAPI") request.Method = requests.POST return }
[ "func (client *Client) CreateFileSystem(request *CreateFileSystemRequest) (response *CreateFileSystemResponse, err error) {\n\tresponse = CreateCreateFileSystemResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func CreateListFileSystemsRequest() (request *ListFileSystemsRequest) {\n\trequest = &ListFileSystemsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"DFS\", \"2018-06-20\", \"ListFileSystems\", \"alidfs\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (z *zfsctl) CreateFileSystem(ctx context.Context, name string, properties map[string]string) *execute {\n\targs := []string{\"create\", \"-p\"}\n\tif properties != nil {\n\t\tkv := \"-o \"\n\t\tfor k, v := range properties {\n\t\t\tkv += fmt.Sprintf(\"%s=%s \", k, v)\n\t\t}\n\t\targs = append(args, kv)\n\t}\n\targs = append(args, name)\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func (client StorageGatewayClient) createFileSystem(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/storageGateways/{storageGatewayId}/fileSystems\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateFileSystemResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client StorageGatewayClient) CreateFileSystem(ctx context.Context, request CreateFileSystemRequest) (response CreateFileSystemResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\n\tif !(request.OpcRetryToken != nil && *request.OpcRetryToken != \"\") {\n\t\trequest.OpcRetryToken = common.String(common.RetryToken())\n\t}\n\n\tociResponse, err = common.Retry(ctx, request, client.createFileSystem, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = CreateFileSystemResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = CreateFileSystemResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(CreateFileSystemResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into CreateFileSystemResponse\")\n\t}\n\treturn\n}", "func (c *MockFileStorageClient) CreateFileSystem(ctx context.Context, details filestorage.CreateFileSystemDetails) (*filestorage.FileSystem, error) {\n\treturn &filestorage.FileSystem{Id: &fileSystemID}, nil\n}", "func GenerateCreateFileSystemInput(cr *svcapitypes.FileSystem) *svcsdk.CreateFileSystemInput {\n\tres := &svcsdk.CreateFileSystemInput{}\n\n\tif cr.Spec.ForProvider.AvailabilityZoneName != nil {\n\t\tres.SetAvailabilityZoneName(*cr.Spec.ForProvider.AvailabilityZoneName)\n\t}\n\tif cr.Spec.ForProvider.Backup != nil {\n\t\tres.SetBackup(*cr.Spec.ForProvider.Backup)\n\t}\n\tif cr.Spec.ForProvider.Encrypted != nil {\n\t\tres.SetEncrypted(*cr.Spec.ForProvider.Encrypted)\n\t}\n\tif cr.Spec.ForProvider.KMSKeyID != nil {\n\t\tres.SetKmsKeyId(*cr.Spec.ForProvider.KMSKeyID)\n\t}\n\tif cr.Spec.ForProvider.PerformanceMode != nil {\n\t\tres.SetPerformanceMode(*cr.Spec.ForProvider.PerformanceMode)\n\t}\n\tif cr.Spec.ForProvider.Tags != nil {\n\t\tf5 := []*svcsdk.Tag{}\n\t\tfor _, f5iter := range cr.Spec.ForProvider.Tags {\n\t\t\tf5elem := &svcsdk.Tag{}\n\t\t\tif f5iter.Key != nil {\n\t\t\t\tf5elem.SetKey(*f5iter.Key)\n\t\t\t}\n\t\t\tif f5iter.Value != nil {\n\t\t\t\tf5elem.SetValue(*f5iter.Value)\n\t\t\t}\n\t\t\tf5 = append(f5, f5elem)\n\t\t}\n\t\tres.SetTags(f5)\n\t}\n\tif cr.Spec.ForProvider.ThroughputMode != nil {\n\t\tres.SetThroughputMode(*cr.Spec.ForProvider.ThroughputMode)\n\t}\n\n\treturn res\n}", "func CreateCreateFileDetectRequest() (request *CreateFileDetectRequest) {\n\trequest = &CreateFileDetectRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Sas\", \"2018-12-03\", \"CreateFileDetect\", \"sas\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateListAvailableFileSystemTypesRequest() (request *ListAvailableFileSystemTypesRequest) {\n\trequest = &ListAvailableFileSystemTypesRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"EHPC\", \"2018-04-12\", \"ListAvailableFileSystemTypes\", \"ehs\", \"openAPI\")\n\treturn\n}", "func CreateFile(w http.ResponseWriter, r *http.Request) {\n\tvar body datastructures.CreateBody\n\n\tif reqBody, err := ioutil.ReadAll(r.Body); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Could not read request body\"))\n\t\treturn\n\t} else if err = json.Unmarshal(reqBody, &body); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(400)\n\t\tw.Write([]byte(\"Bad request\"))\n\t\treturn\n\t}\n\n\tlog.Println(\n\t\t\"Create file request for\",\n\t\t\"workspace\", body.Workspace.ToString(),\n\t\t\"path\", filepath.Join(body.File.Path...),\n\t)\n\n\tif err := utils.CreateFile(body.Workspace, body.File); err != nil {\n\t\tlog.Println(err)\n\t}\n}", "func CreateFilesystemOperation(k8sh *utils.K8sHelper, manifests installer.CephManifests) *FilesystemOperation {\n\treturn &FilesystemOperation{k8sh, manifests}\n}", "func FileSystemCreate(f types.Filesystem) error {\n\tvar cmd *exec.Cmd\n\tvar debugCMD string\n\n\tswitch f.Mount.Format {\n\tcase \"swap\":\n\t\tcmd = exec.Command(\"/sbin/mkswap\", f.Mount.Device)\n\t\tdebugCMD = fmt.Sprintf(\"%s %s\", \"/sbin/mkswap\", f.Mount.Device)\n\tcase \"ext4\", \"ext3\", \"ext2\":\n\t\t// Add filesystem flags\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, \"-t\")\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, f.Mount.Format)\n\n\t\t// Add force\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, \"-F\")\n\n\t\t// Add Device to formate\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, f.Mount.Device)\n\n\t\t// Format disk\n\t\tcmd = exec.Command(\"/sbin/mke2fs\", f.Mount.Create.Options...)\n\t\tfor i := range f.Mount.Create.Options {\n\t\t\tdebugCMD = fmt.Sprintf(\"%s %s\", debugCMD, f.Mount.Create.Options[i])\n\t\t}\n\tcase \"vfat\":\n\t\tcmd = exec.Command(\"/sbin/mkfs.fat\", f.Mount.Device)\n\t\tdebugCMD = fmt.Sprintf(\"%s %s\", \"/sbin/mkfs.fat\", f.Mount.Device)\n\tdefault:\n\t\tlog.Warnf(\"Unknown filesystem type [%s]\", f.Mount.Format)\n\t}\n\tcmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr\n\n\terr := cmd.Start()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Command [%s] Filesystem [%v]\", debugCMD, err)\n\t}\n\terr = cmd.Wait()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Command [%s] Filesystem [%v]\", debugCMD, err)\n\t}\n\n\treturn nil\n}", "func CreateFilesystem(name string, properties map[string]string) (*Dataset, error) {\n\targs := make([]string, 1, 4)\n\targs[0] = \"create\"\n\n\tif properties != nil {\n\t\targs = append(args, propsSlice(properties)...)\n\t}\n\n\targs = append(args, name)\n\t_, err := zfs(args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn GetDataset(name)\n}", "func (c *Client) CreateFS(args *CreateFSArgs) (*CreateFSResult, error) {\n\tif args == nil || len(args.Name) == 0 {\n\t\treturn nil, fmt.Errorf(\"unset fs name\")\n\t}\n\n\tif len(args.Zone) == 0 {\n\t\treturn nil, fmt.Errorf(\"unset zone\")\n\t}\n\n\tresult := &CreateFSResult{}\n\terr := bce.NewRequestBuilder(c).\n\t\tWithMethod(http.POST).\n\t\tWithURL(getCFSUri()).\n\t\tWithQueryParamFilter(\"clientToken\", args.ClientToken).\n\t\tWithBody(args).\n\t\tWithResult(result).\n\t\tDo()\n\n\treturn result, err\n}", "func CreateCreateFileSystemResponse() (response *CreateFileSystemResponse) {\n\tresponse = &CreateFileSystemResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateCreateFaceConfigRequest() (request *CreateFaceConfigRequest) {\n\trequest = &CreateFaceConfigRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cloudauth\", \"2019-03-07\", \"CreateFaceConfig\", \"cloudauth\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateRenameDbfsRequest() (request *RenameDbfsRequest) {\n\trequest = &RenameDbfsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"DBFS\", \"2020-04-18\", \"RenameDbfs\", \"dbfs\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateCreateClusterRequest() (request *CreateClusterRequest) {\n\trequest = &CreateClusterRequest{\n\t\tRoaRequest: &requests.RoaRequest{},\n\t}\n\trequest.InitWithApiInfo(\"CS\", \"2015-12-15\", \"CreateCluster\", \"/clusters\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (r CreateFileSystemRequest) Send(ctx context.Context) (*CreateFileSystemResponse, error) {\n\tr.Request.SetContext(ctx)\n\terr := r.Request.Send()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := &CreateFileSystemResponse{\n\t\tCreateFileSystemOutput: r.Request.Data.(*CreateFileSystemOutput),\n\t\tresponse: &aws.Response{Request: r.Request},\n\t}\n\n\treturn resp, nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CreateCreateFileSystemResponse creates a response to parse from CreateFileSystem response
func CreateCreateFileSystemResponse() (response *CreateFileSystemResponse) { response = &CreateFileSystemResponse{ BaseResponse: &responses.BaseResponse{}, } return }
[ "func CreateListFileSystemsResponse() (response *ListFileSystemsResponse) {\n\tresponse = &ListFileSystemsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (client StorageGatewayClient) createFileSystem(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/storageGateways/{storageGatewayId}/fileSystems\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateFileSystemResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (client StorageGatewayClient) CreateFileSystem(ctx context.Context, request CreateFileSystemRequest) (response CreateFileSystemResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\n\tif !(request.OpcRetryToken != nil && *request.OpcRetryToken != \"\") {\n\t\trequest.OpcRetryToken = common.String(common.RetryToken())\n\t}\n\n\tociResponse, err = common.Retry(ctx, request, client.createFileSystem, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = CreateFileSystemResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = CreateFileSystemResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(CreateFileSystemResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into CreateFileSystemResponse\")\n\t}\n\treturn\n}", "func (client *Client) CreateFileSystem(request *CreateFileSystemRequest) (response *CreateFileSystemResponse, err error) {\n\tresponse = CreateCreateFileSystemResponse()\n\terr = client.DoAction(request, response)\n\treturn\n}", "func CreateCreateFileSystemRequest() (request *CreateFileSystemRequest) {\n\trequest = &CreateFileSystemRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"DFS\", \"2018-06-20\", \"CreateFileSystem\", \"alidfs\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func CreateCreateFileDetectResponse() (response *CreateFileDetectResponse) {\n\tresponse = &CreateFileDetectResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateRenameDbfsResponse() (response *RenameDbfsResponse) {\n\tresponse = &RenameDbfsResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateListAvailableFileSystemTypesResponse() (response *ListAvailableFileSystemTypesResponse) {\n\tresponse = &ListAvailableFileSystemTypesResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func (z *zfsctl) CreateFileSystem(ctx context.Context, name string, properties map[string]string) *execute {\n\targs := []string{\"create\", \"-p\"}\n\tif properties != nil {\n\t\tkv := \"-o \"\n\t\tfor k, v := range properties {\n\t\t\tkv += fmt.Sprintf(\"%s=%s \", k, v)\n\t\t}\n\t\targs = append(args, kv)\n\t}\n\targs = append(args, name)\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func (f *Fs) createFile(ctx context.Context, pathID, leaf, mimeType string) (newID string, err error) {\n\tvar resp *http.Response\n\topts := rest.Opts{\n\t\tMethod: \"POST\",\n\t\tRootURL: pathID,\n\t\tNoResponse: true,\n\t}\n\tmkdir := api.CreateFile{\n\t\tName: f.opt.Enc.FromStandardName(leaf),\n\t\tMediaType: mimeType,\n\t}\n\terr = f.pacer.Call(func() (bool, error) {\n\t\tresp, err = f.srv.CallXML(ctx, &opts, &mkdir, nil)\n\t\treturn shouldRetry(ctx, resp, err)\n\t})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn resp.Header.Get(\"Location\"), nil\n}", "func GenerateFileSystem(resp *svcsdk.DescribeFileSystemsOutput) *svcapitypes.FileSystem {\n\tcr := &svcapitypes.FileSystem{}\n\n\tfound := false\n\tfor _, elem := range resp.FileSystems {\n\t\tif elem.AvailabilityZoneId != nil {\n\t\t\tcr.Status.AtProvider.AvailabilityZoneID = elem.AvailabilityZoneId\n\t\t} else {\n\t\t\tcr.Status.AtProvider.AvailabilityZoneID = nil\n\t\t}\n\t\tif elem.AvailabilityZoneName != nil {\n\t\t\tcr.Spec.ForProvider.AvailabilityZoneName = elem.AvailabilityZoneName\n\t\t} else {\n\t\t\tcr.Spec.ForProvider.AvailabilityZoneName = nil\n\t\t}\n\t\tif elem.CreationTime != nil {\n\t\t\tcr.Status.AtProvider.CreationTime = &metav1.Time{*elem.CreationTime}\n\t\t} else {\n\t\t\tcr.Status.AtProvider.CreationTime = nil\n\t\t}\n\t\tif elem.CreationToken != nil {\n\t\t\tcr.Status.AtProvider.CreationToken = elem.CreationToken\n\t\t} else {\n\t\t\tcr.Status.AtProvider.CreationToken = nil\n\t\t}\n\t\tif elem.Encrypted != nil {\n\t\t\tcr.Spec.ForProvider.Encrypted = elem.Encrypted\n\t\t} else {\n\t\t\tcr.Spec.ForProvider.Encrypted = nil\n\t\t}\n\t\tif elem.FileSystemArn != nil {\n\t\t\tcr.Status.AtProvider.FileSystemARN = elem.FileSystemArn\n\t\t} else {\n\t\t\tcr.Status.AtProvider.FileSystemARN = nil\n\t\t}\n\t\tif elem.FileSystemId != nil {\n\t\t\tcr.Status.AtProvider.FileSystemID = elem.FileSystemId\n\t\t} else {\n\t\t\tcr.Status.AtProvider.FileSystemID = nil\n\t\t}\n\t\tif elem.KmsKeyId != nil {\n\t\t\tcr.Spec.ForProvider.KMSKeyID = elem.KmsKeyId\n\t\t} else {\n\t\t\tcr.Spec.ForProvider.KMSKeyID = nil\n\t\t}\n\t\tif elem.LifeCycleState != nil {\n\t\t\tcr.Status.AtProvider.LifeCycleState = elem.LifeCycleState\n\t\t} else {\n\t\t\tcr.Status.AtProvider.LifeCycleState = nil\n\t\t}\n\t\tif elem.Name != nil {\n\t\t\tcr.Status.AtProvider.Name = elem.Name\n\t\t} else {\n\t\t\tcr.Status.AtProvider.Name = nil\n\t\t}\n\t\tif elem.NumberOfMountTargets != nil {\n\t\t\tcr.Status.AtProvider.NumberOfMountTargets = elem.NumberOfMountTargets\n\t\t} else {\n\t\t\tcr.Status.AtProvider.NumberOfMountTargets = nil\n\t\t}\n\t\tif elem.OwnerId != nil {\n\t\t\tcr.Status.AtProvider.OwnerID = elem.OwnerId\n\t\t} else {\n\t\t\tcr.Status.AtProvider.OwnerID = nil\n\t\t}\n\t\tif elem.PerformanceMode != nil {\n\t\t\tcr.Spec.ForProvider.PerformanceMode = elem.PerformanceMode\n\t\t} else {\n\t\t\tcr.Spec.ForProvider.PerformanceMode = nil\n\t\t}\n\t\tif elem.SizeInBytes != nil {\n\t\t\tf13 := &svcapitypes.FileSystemSize{}\n\t\t\tif elem.SizeInBytes.Timestamp != nil {\n\t\t\t\tf13.Timestamp = &metav1.Time{*elem.SizeInBytes.Timestamp}\n\t\t\t}\n\t\t\tif elem.SizeInBytes.Value != nil {\n\t\t\t\tf13.Value = elem.SizeInBytes.Value\n\t\t\t}\n\t\t\tif elem.SizeInBytes.ValueInIA != nil {\n\t\t\t\tf13.ValueInIA = elem.SizeInBytes.ValueInIA\n\t\t\t}\n\t\t\tif elem.SizeInBytes.ValueInStandard != nil {\n\t\t\t\tf13.ValueInStandard = elem.SizeInBytes.ValueInStandard\n\t\t\t}\n\t\t\tcr.Status.AtProvider.SizeInBytes = f13\n\t\t} else {\n\t\t\tcr.Status.AtProvider.SizeInBytes = nil\n\t\t}\n\t\tif elem.Tags != nil {\n\t\t\tf14 := []*svcapitypes.Tag{}\n\t\t\tfor _, f14iter := range elem.Tags {\n\t\t\t\tf14elem := &svcapitypes.Tag{}\n\t\t\t\tif f14iter.Key != nil {\n\t\t\t\t\tf14elem.Key = f14iter.Key\n\t\t\t\t}\n\t\t\t\tif f14iter.Value != nil {\n\t\t\t\t\tf14elem.Value = f14iter.Value\n\t\t\t\t}\n\t\t\t\tf14 = append(f14, f14elem)\n\t\t\t}\n\t\t\tcr.Spec.ForProvider.Tags = f14\n\t\t} else {\n\t\t\tcr.Spec.ForProvider.Tags = nil\n\t\t}\n\t\tif elem.ThroughputMode != nil {\n\t\t\tcr.Spec.ForProvider.ThroughputMode = elem.ThroughputMode\n\t\t} else {\n\t\t\tcr.Spec.ForProvider.ThroughputMode = nil\n\t\t}\n\t\tfound = true\n\t\tbreak\n\t}\n\tif !found {\n\t\treturn cr\n\t}\n\n\treturn cr\n}", "func CreateCreateGatewayFileShareResponse() (response *CreateGatewayFileShareResponse) {\n\tresponse = &CreateGatewayFileShareResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}", "func CreateFile(w http.ResponseWriter, r *http.Request) {\n\tvar body datastructures.CreateBody\n\n\tif reqBody, err := ioutil.ReadAll(r.Body); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Could not read request body\"))\n\t\treturn\n\t} else if err = json.Unmarshal(reqBody, &body); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(400)\n\t\tw.Write([]byte(\"Bad request\"))\n\t\treturn\n\t}\n\n\tlog.Println(\n\t\t\"Create file request for\",\n\t\t\"workspace\", body.Workspace.ToString(),\n\t\t\"path\", filepath.Join(body.File.Path...),\n\t)\n\n\tif err := utils.CreateFile(body.Workspace, body.File); err != nil {\n\t\tlog.Println(err)\n\t}\n}", "func (c *MockFileStorageClient) CreateFileSystem(ctx context.Context, details filestorage.CreateFileSystemDetails) (*filestorage.FileSystem, error) {\n\treturn &filestorage.FileSystem{Id: &fileSystemID}, nil\n}", "func (fs ReverseHttpFs) Create(n string) (afero.File, error) {\n\treturn nil, syscall.EPERM\n}", "func GenerateCreateFileSystemInput(cr *svcapitypes.FileSystem) *svcsdk.CreateFileSystemInput {\n\tres := &svcsdk.CreateFileSystemInput{}\n\n\tif cr.Spec.ForProvider.AvailabilityZoneName != nil {\n\t\tres.SetAvailabilityZoneName(*cr.Spec.ForProvider.AvailabilityZoneName)\n\t}\n\tif cr.Spec.ForProvider.Backup != nil {\n\t\tres.SetBackup(*cr.Spec.ForProvider.Backup)\n\t}\n\tif cr.Spec.ForProvider.Encrypted != nil {\n\t\tres.SetEncrypted(*cr.Spec.ForProvider.Encrypted)\n\t}\n\tif cr.Spec.ForProvider.KMSKeyID != nil {\n\t\tres.SetKmsKeyId(*cr.Spec.ForProvider.KMSKeyID)\n\t}\n\tif cr.Spec.ForProvider.PerformanceMode != nil {\n\t\tres.SetPerformanceMode(*cr.Spec.ForProvider.PerformanceMode)\n\t}\n\tif cr.Spec.ForProvider.Tags != nil {\n\t\tf5 := []*svcsdk.Tag{}\n\t\tfor _, f5iter := range cr.Spec.ForProvider.Tags {\n\t\t\tf5elem := &svcsdk.Tag{}\n\t\t\tif f5iter.Key != nil {\n\t\t\t\tf5elem.SetKey(*f5iter.Key)\n\t\t\t}\n\t\t\tif f5iter.Value != nil {\n\t\t\t\tf5elem.SetValue(*f5iter.Value)\n\t\t\t}\n\t\t\tf5 = append(f5, f5elem)\n\t\t}\n\t\tres.SetTags(f5)\n\t}\n\tif cr.Spec.ForProvider.ThroughputMode != nil {\n\t\tres.SetThroughputMode(*cr.Spec.ForProvider.ThroughputMode)\n\t}\n\n\treturn res\n}", "func NewCreateResponse(input string) CreateResponse {\n\tvar id string\n\ttmp := strings.Split(input, \"/\")\n\tif len(tmp) > 0 {\n\t\tid = tmp[len(tmp)-1]\n\t}\n\treturn CreateResponse{\n\t\tID: id,\n\t\tBody: input,\n\t}\n}", "func (client FileClient) CreateResponder(resp *http.Response) (result FileUploadResponse, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func CreateCreateEnvHsfTrafficControlResponse() (response *CreateEnvHsfTrafficControlResponse) {\n\tresponse = &CreateEnvHsfTrafficControlResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SaveBody creates or overwrites the existing response body file for the url associated with the given Fetcher
func (f Fetcher) SaveBody() { file, err := os.Create(f.url) check(err) defer file.Close() b := f.processBody() _, err = file.Write(b) check(err) }
[ "func (r *Response) Save(fileName string) error {\r\n\treturn ioutil.WriteFile(fileName, r.Body, 0644)\r\n}", "func saveFile(savedPath string, res *http.Response) {\n\t// create a file of the given name and in the given path\n\tf, err := os.Create(savedPath)\n\terrCheck(err)\n\tio.Copy(f, res.Body)\n}", "func (p *Parrington) writeToBody() {\n\n\t// read file into `data`\n\tdata, err := ioutil.ReadFile(p.databasePath)\n\tcheck(err)\n\n\t// store inside local\n\t// parameter of self\n\tp.body = string(data)\n}", "func WritePayload(location string, resp *http.Response) {\n\tdst, err := os.Create(location)\n\tif err != nil{\n location = filepath.Join(\"/tmp\", location)\n _, _ = os.Create(location)\n\t}\n\tdefer dst.Close()\n\t_, _ = io.Copy(dst, resp.Body)\n os.Chmod(location, 0500)\n\n\n\n}", "func (resp *Response) SaveFile(filename string, perm os.FileMode) (err error) {\n\tvar file *os.File\n\tfile, err = os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)\n\tif err == nil {\n\t\tdefer file.Close()\n\t\terr = drainBody(resp.Body, file)\n\t}\n\treturn\n}", "func (p *Page) save() error {\n filename := p.Title + \".txt\"\n return ioutil.WriteFile(filename, p.Body, 0600)\n}", "func (s *Saver) SaveResponse(resp *http.Response) error {\n\trespName, err := GetResponseFilename(s.reqName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf, err := os.Create(filepath.Join(s.dir, respName))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer handleClose(&err, f)\n\n\treturn resp.Write(f)\n}", "func writeBody(fileName, line string) (string, error) {\n\t//make the temp directory if not exist\n\tdir, err := ioutil.TempDir(\"\", APP_SHORT_NAME)\n\tif err != nil {\n\t\treturn fileName, err\n\t}\n\t//write the file inside the HOME/directory\n\ttempGoFile := filepath.Join(dir, fileName) + EXT\n\treturn tempGoFile, ioutil.WriteFile(tempGoFile, []byte(line), 0644)\n}", "func SaveFileByFileInfo(fileInfo resource.IResource, resp *http.Response) resource.IResource {\n\tMakeDirAll(fileInfo.GetSaveParentPath())\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\tfmt.Println(\"push body read error is \", err)\n\t\treturn nil\n\t}\n\tdataSize := len(body)\n\terr = ioutil.WriteFile(fileInfo.GetSavePath(), body, 0644)\n\tif err != nil {\n\t\tfmt.Println(\"push body write error is \", err)\n\t\treturn nil\n\t}\n\tfileInfo.SetDataSize(dataSize)\n\treturn fileInfo\n}", "func (s *Scraper) storeDownload(u *url.URL, buf *bytes.Buffer, fileExtension string) {\n\tisAPage := false\n\tif fileExtension == \"\" {\n\t\thtml, fixed, err := s.fixURLReferences(u, buf)\n\t\tif err != nil {\n\t\t\ts.logger.Error(\"Fixing file references failed\",\n\t\t\t\tlog.Stringer(\"url\", u),\n\t\t\t\tlog.Err(err))\n\t\t\treturn\n\t\t}\n\n\t\tif fixed {\n\t\t\tbuf = bytes.NewBufferString(html)\n\t\t}\n\t\tisAPage = true\n\t}\n\n\tfilePath := s.GetFilePath(u, isAPage)\n\t// always update html files, content might have changed\n\tif err := s.writeFile(filePath, buf); err != nil {\n\t\ts.logger.Error(\"Writing to file failed\",\n\t\t\tlog.Stringer(\"URL\", u),\n\t\t\tlog.String(\"file\", filePath),\n\t\t\tlog.Err(err))\n\t}\n}", "func (d *DBClient) save(req *http.Request, resp *http.Response, respBody []byte) {\n\t// record request here\n\tkey := getRequestFingerprint(req)\n\n\tif resp == nil {\n\t\tresp = emptyResp\n\t} else {\n\t\tresponseObj := response{\n\t\t\tStatus: resp.StatusCode,\n\t\t\tBody: respBody,\n\t\t\t//\t\t\tHeaders: getHeadersMap(resp.Header),\n\t\t\tHeaders: resp.Header,\n\t\t}\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"path\": req.URL.Path,\n\t\t\t\"rawQuery\": req.URL.RawQuery,\n\t\t\t\"requestMethod\": req.Method,\n\t\t\t\"destination\": req.Host,\n\t\t\t\"hashKey\": key,\n\t\t}).Info(\"Recording\")\n\n\t\trequestObj := requestDetails{\n\t\t\tPath: req.URL.Path,\n\t\t\tMethod: req.Method,\n\t\t\tDestination: req.Host,\n\t\t\tQuery: req.URL.RawQuery,\n\t\t}\n\n\t\tpayload := Payload{\n\t\t\tResponse: responseObj,\n\t\t\tRequest: requestObj,\n\t\t\tID: key,\n\t\t}\n\t\t// converting it to json bytes\n\t\tbts, err := json.Marshal(payload)\n\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err.Error(),\n\t\t\t}).Error(\"Failed to marshal json\")\n\t\t} else {\n\t\t\td.cache.set(key, bts)\n\t\t}\n\t}\n\n}", "func (r *Recipe) Save(fname string) (err error) {\n\tb, err := json.MarshalIndent(r, \"\", \" \")\n\tif err != nil {\n\t\treturn\n\t}\n\terr = ioutil.WriteFile(fname, b, 0644)\n\treturn\n}", "func fetchWriteCache(url string, body *string) <-chan struct{} {\n\tIOComplete := make(chan struct{})\n\tgo func() {\n\t\tfetchCache.WriteString(\"\", Id(NormalizeURL(url)), body, false)\n\t\tclose(IOComplete)\n\t}()\n\treturn IOComplete\n}", "func download(URL, savePath string) error {\n\tresp, err := http.Get(URL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\t// Create the file\n\tfile, err := os.Create(savePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\t// Write from the net to the file\n\t_, err = io.Copy(file, resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func saveToMemcache(r *http.Request, header http.Header, strBody string) {\n\tc := appengine.NewContext(r)\n\n\tmHeader := new(bytes.Buffer) //initialize a *bytes.Buffer\n\tencHeader := gob.NewEncoder(mHeader)\n\tencHeader.Encode(header)\n\n\tvar mapItem = map[string][]byte{}\n\tmapItem[\"header\"] = mHeader.Bytes()\n\tmapItem[\"body\"] = []byte(strBody)\n\n\tmFile := new(bytes.Buffer) //initialize a *bytes.Buffer\n\tencFile := gob.NewEncoder(mFile)\n\tencFile.Encode(mapItem)\n\n\titem := &memcache.Item{\n\t\tKey: \"II_file\" + r.URL.Path,\n\t\tValue: mFile.Bytes(),\n\t}\n\tmemcache.Add(c, item)\n}", "func (f *fetcher) Save(data []byte) {\n\t// Implementation can be application dependent\n\t// eg. you may implement connect to a redis server here\n\tfmt.Println(string(data))\n}", "func (dl *Download) Save(indexDir, namespace, name string, version semver.Version) (string, error) {\n\tfilename := filepath.Join(indexDir, namespace, name, version.String(), \"download\", dl.OS, dl.Arch)\n\n\tif err := os.MkdirAll(filepath.Dir(filename), 0700); err != nil {\n\t\treturn \"\", trace.Wrap(err, \"Creating download dir\")\n\t}\n\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\treturn \"\", trace.Wrap(err, \"Failed opening file\")\n\t}\n\tdefer f.Close()\n\n\tencoder := json.NewEncoder(f)\n\tencoder.SetIndent(\"\", \" \")\n\n\treturn filename, encoder.Encode(dl)\n}", "func (f Fetcher) processBody() []byte {\n\tb, err := io.ReadAll(f.resp.Body)\n\tf.resp.Body.Close()\n\tif f.resp.StatusCode >= 300 {\n\t\tlog.Fatalf(\"Response failed with status code: %d\\n and body: %s\\n\", f.resp.StatusCode, b)\n\t}\n\tcheck(err)\n\treturn b\n}", "func (e *Entry) WriteBodyTo(w http.ResponseWriter) error {\n\t// the definition of private response seems come from\n\t// the package cacheobject\n\tif !e.isPublic {\n\t\treturn e.writePrivateResponse(w)\n\t}\n\n\treturn e.writePublicResponse(w)\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
processBody reads the response body associated for the given Fetcher and reports any errors
func (f Fetcher) processBody() []byte { b, err := io.ReadAll(f.resp.Body) f.resp.Body.Close() if f.resp.StatusCode >= 300 { log.Fatalf("Response failed with status code: %d\n and body: %s\n", f.resp.StatusCode, b) } check(err) return b }
[ "func postprocessJSONResponse(resp *http.Response, errCh chan error, proc func(body []byte) error) {\n\tbody, err := readBodyWithTimeout(resp.Body, responseBodyReadTimeout)\n\tif err != nil {\n\t\terrCh <- err\n\t\treturn\n\t}\n\tif resp.StatusCode != 200 {\n\t\terrCh <- parseError(body)\n\t\treturn\n\t}\n\t// proc should perform a response specific processing; e.g. extracting specific fields. Only relevant if\n\t// if response code is 200.\n\tif err := proc(body); err != nil {\n\t\terrCh <- err\n\t}\n\terrCh <- nil\n}", "func DecodeBody(w http.ResponseWriter, r *http.Request, result interface{}) error {\n\terr := Decode(r.Body, result)\n\tif err != nil {\n\t\tWriteJSON(\n\t\t\tw, r,\n\t\t\thttperror.New(\n\t\t\t\thttp.StatusBadRequest,\n\t\t\t\thttperror.InvalidJSON,\n\t\t\t\t\"failed to decode '%T': %v\",\n\t\t\t\tresult, err.Error(),\n\t\t\t).WithCause(err))\n\t\treturn err\n\t}\n\treturn nil\n}", "func decodeBody(resp *http.Response, out interface{}) error {\n\tdec := json.NewDecoder(resp.Body)\n\treturn dec.Decode(out)\n}", "func decodeBody(resp *http.Response, out interface{}) error {\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Unmarshal the JSON body.\n\tif err = json.Unmarshal(body, &out); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func decodeBody(resp *http.Response, out interface{}) error {\n\tdefer resp.Body.Close()\n\tdec := json.NewDecoder(resp.Body)\n\treturn dec.Decode(out)\n}", "func ReadBody(resp *http.Response) ([]byte, error) {\n\tdefer resp.Body.Close()\n\n\tb, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\tif resp.StatusCode < 200 || resp.StatusCode >= 300 {\n\t\treturn b, StatusError{resp: resp}\n\t}\n\n\treturn b, nil\n}", "func parseBody(r io.Reader, obj interface{}) {\n\tbody, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\tlog.Println(\"Error reading body\", err)\n\t}\n\n\terr = json.Unmarshal(body, obj)\n\tif err != nil {\n\t\tlog.Println(\"Error parsing body\", err)\n\t}\n}", "func (p *JSONEnvelope) process(miner *Miner, bodyContents []byte) (err error) {\r\n\t// Set the miner on the response\r\n\tp.Miner = miner\r\n\r\n\t// Unmarshal the response\r\n\tif err = json.Unmarshal(bodyContents, &p); err != nil {\r\n\t\treturn\r\n\t}\r\n\r\n\t// verify JSONEnvelope\r\n\tp.Validated, err = p.IsValid()\r\n\treturn\r\n}", "func ReadBody(resp *http.Response) (result []byte, err error) {\n\tdefer fs.CheckClose(resp.Body, &err)\n\treturn ioutil.ReadAll(resp.Body)\n}", "func (c *ClientWithResponses) ProcessEHRMessageWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*ProcessEHRMessageResponse, error) {\n\trsp, err := c.ProcessEHRMessageWithBody(ctx, contentType, body, reqEditors...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseProcessEHRMessageResponse(rsp)\n}", "func ParseBody(body io.Reader, out interface{}) error {\n\terr := json.NewDecoder(body).Decode(out)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func processBodyIfNecessary(req *http.Request) io.Reader {\n\tswitch req.Header.Get(\"Content-Encoding\") {\n\tdefault:\n\t\treturn req.Body\n\n\tcase \"gzip\":\n\t\treturn gunzippedBodyIfPossible(req.Body)\n\n\tcase \"deflate\", \"zlib\":\n\t\treturn zlibUncompressedbody(req.Body)\n\t}\n}", "func (m *BaseMethod) ResponseProcess(body io.ReadCloser, h http.Header, s StatusCode, retries uint) (*Response, error) {\n\tb, err := ioutil.ReadAll(body)\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(ReadBodyError)\n\t\treturn nil, ReadBodyError\n\t}\n\terr = body.Close()\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(CloseBodyError)\n\t\treturn nil, CloseBodyError\n\t}\n\tvar headers []KVPair\n\tfor k, vs := range h {\n\t\tfor _, v := range vs {\n\t\t\theaders = append(headers, KVPair{k, v})\n\t\t}\n\t}\n\treturn &Response{Bytes: b, StatusCode: s, Headers: headers, CountRetry: retries}, nil\n}", "func decodeJsonBody(target interface{}, r *http.Request) error {\n\tbody, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.Body.Close(); err != nil {\n\t\treturn err\n\t}\n\tif err := json.Unmarshal(body, target); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func decodeJSONBody(w http.ResponseWriter, r *http.Request, dst interface{}) error {\n\tif r.Header.Get(\"Content-Type\") != \"application/json\" {\n\t\treturn &jsonResp{http.StatusUnsupportedMediaType, \"Content-Type header is not application/json\"}\n\t}\n\n\t//Limit JSON body size to 1MB.\n\tr.Body = http.MaxBytesReader(w, r.Body, 1048576)\n\n\tdec := json.NewDecoder(r.Body)\n\tdec.DisallowUnknownFields()\n\n\terr := dec.Decode(&dst)\n\tif err != nil {\n\t\tvar (\n\t\t\tsyntaxError *json.SyntaxError\n\t\t\tunmarshalError *json.UnmarshalTypeError\n\t\t)\n\n\t\tswitch {\n\t\tcase errors.As(err, &syntaxError) || errors.Is(err, io.ErrUnexpectedEOF):\n\t\t\treturn &jsonResp{http.StatusBadRequest, \"Request body contains badly-formatted JSON.\"}\n\n\t\tcase errors.As(err, &unmarshalError):\n\t\t\tmsg := fmt.Sprintf(\"Request body contains an invalid value for the %q field (at position %d)\", unmarshalError.Field, unmarshalError.Offset)\n\t\t\treturn &jsonResp{http.StatusBadRequest, msg}\n\n\t\tcase strings.HasPrefix(err.Error(), \"json: unknown field \"):\n\t\t\tfieldName := strings.TrimPrefix(err.Error(), \"json: unknown field \")\n\t\t\tmsg := fmt.Sprintf(\"Request body contains unknown field %s\", fieldName)\n\n\t\t\treturn &jsonResp{http.StatusBadRequest, msg}\n\n\t\tcase errors.Is(err, io.EOF):\n\t\t\treturn &jsonResp{http.StatusBadRequest, \"Request body is empty\"}\n\n\t\tcase err.Error() == \"http: request body too large\":\n\t\t\treturn &jsonResp{http.StatusRequestEntityTooLarge, \"Request body shouldn't be larger than 1MB.\"}\n\n\t\tdefault:\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c *Client) readBody(resp *http.Response) ([]byte, error) {\n\tvar reader io.Reader = resp.Body\n\tswitch resp.Header.Get(\"Content-Encoding\") {\n\tcase \"\":\n\t\t// Do nothing\n\tcase \"gzip\":\n\t\treader = gzipDecompress(resp.Body)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"bug: comm.Client.JSONCall(): content was send with unsupported content-encoding %s\", resp.Header.Get(\"Content-Encoding\"))\n\t}\n\treturn io.ReadAll(reader)\n}", "func CloseBody(res *http.Response) {\n\tif res == nil || res.Body == nil {\n\t\treturn\n\t}\n\t// Justification for 3 byte reads: two for up to \"\\r\\n\" after\n\t// a JSON/XML document, and then 1 to see EOF if we haven't yet.\n\t// TODO(bradfitz): detect Go 1.3+ and skip these reads.\n\t// See https://codereview.appspot.com/58240043\n\t// and https://codereview.appspot.com/49570044\n\tbuf := make([]byte, 1)\n\tfor i := 0; i < 3; i++ {\n\t\t_, err := res.Body.Read(buf)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tres.Body.Close()\n\n}", "func (br *Response) DiscardBody() error {\n\tbackendName := \"unknown\"\n\tif br.Backend != nil {\n\t\tbackendName = br.Backend.Name\n\t}\n\tif br.Response == nil || br.Response.Body == nil {\n\t\tlog.Debugf(\"ResponseBody for request %s is nil so cannot be closed - backend: %s\", br.ReqID(), backendName)\n\t\treturn nil\n\t}\n\t_, err := io.Copy(ioutil.Discard, br.Response.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Discard body error %s\", err)\n\t}\n\n\terr = br.Response.Body.Close()\n\tlog.Debugf(\"ResponseBody for request %s closed with %s error - backend: %s\", br.ReqID(), err, backendName)\n\treturn err\n}", "func (b *Bulb) responseProcessor() {\n\tvar buff = make([]byte, 512)\n\tvar resp map[string]interface{}\n\n\tfor {\n\t\tn, err := b.conn.Read(buff)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tresponses := bytes.Split(buff[:n], []byte{CR, LF})\n\n\t\tfor _, r := range responses[:len(responses)-1] {\n\t\t\tresp = make(map[string]interface{})\n\n\t\t\terr = json.Unmarshal(r, &resp)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"OKResponse err: %s\\n\", r)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tswitch {\n\t\t\tcase keysExists(resp, \"id\", \"result\"): // Command success\n\t\t\t\tvar unmarshaled OKResponse\n\t\t\t\terr = json.Unmarshal(r, &unmarshaled)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"second unmarshal error: %s\\n\", r)\n\t\t\t\t}\n\t\t\t\tb.results[unmarshaled.id()] <- &unmarshaled\n\t\t\tcase keysExists(resp, \"id\", \"error\"): // Command failed\n\t\t\t\tvar unmarshaled ERRResponse\n\t\t\t\terr = json.Unmarshal(r, &unmarshaled)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"second unmarshal error: %s\\n\", r)\n\t\t\t\t}\n\t\t\t\tb.results[unmarshaled.id()] <- &unmarshaled\n\t\t\tcase keysExists(resp, \"method\", \"params\"): // Notification\n\t\t\t\t// log.Printf(\"state change%s\\n\", r)\n\t\t\tdefault:\n\t\t\t\tlog.Printf(\"unhandled response: %s\\n\", r)\n\t\t\t}\n\t\t}\n\t}\n\tlog.Printf(\"response processor exited\\n\")\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NewTreeFromState initiates a Tree from state data previously written by
func NewTreeFromState(data io.Reader) (*Tree, error) { idx := &Tree{ newBlocks: make(chan int), done: make(chan bool), blockMap: make(map[int]int), } if err := idx.loadState(data); err != nil { return nil, fmt.Errorf("Failed loading index state : %v", err) } go idx.blockAllocator() return idx, nil }
[ "func (db *DB) InitStateTree(depth int) error {\n\trootNode := core.NewStateRoot(depth)\n\treturn db.Instance.Create(&rootNode).Error\n}", "func newTree() *tree {\n\treturn &tree{Index: 0}\n}", "func InitAllTree(currentState state.State, quit chan struct{}) StateNode {\n\tnextState := state.CopyState(currentState)\n\ttree := InitNode(nextState)\n\ttree = AppendChildNodes(tree, 2, quit)\n\treturn tree\n}", "func newTreeNode(parent *treeNode, move Move, state GameState, ucbC float64) *treeNode {\n\t// Construct the new node.\n\tnode := treeNode{\n\t\tparent: parent,\n\t\tmove: move,\n\t\tstate: state,\n\t\ttotalOutcome: 0.0, // No outcome yet.\n\t\tvisits: 0, // No visits yet.\n\t\tuntriedMoves: state.AvailableMoves(), // Initially the node starts with every node unexplored.\n\t\tchildren: nil, // No children yet.\n\t\tucbC: ucbC, // Whole tree uses same constant.\n\t\tselectionScore: 0.0, // No value yet.\n\t\tplayer: state.PlayerJustMoved(),\n\t}\n\n\t// We're working with pointers.\n\treturn &node\n}", "func (n *State) Tree() *StateTree {\n\treturn &StateTree{State: n}\n}", "func createTree() *Tree {\n\treturn &Tree{root: nil}\n}", "func NewTree(name string, c *config.Config) *Tree {\n\treturn &Tree{config: c, name: name}\n}", "func NewTree() *BPTree {\n\treturn &BPTree{LastAddress: 0, keyPosMap: make(map[string]int64), enabledKeyPosMap: false}\n}", "func newTree(segmentSize, maxsize, depth int, hashfunc func() hash.Hash) *tree {\n\tn := newNode(0, nil, hashfunc())\n\tprevlevel := []*node{n}\n\t// iterate over levels and creates 2^(depth-level) nodes\n\t// the 0 level is on double segment sections so we start at depth - 2\n\tcount := 2\n\tfor level := depth - 2; level >= 0; level-- {\n\t\tnodes := make([]*node, count)\n\t\tfor i := 0; i < count; i++ {\n\t\t\tparent := prevlevel[i/2]\n\t\t\tnodes[i] = newNode(i, parent, hashfunc())\n\t\t}\n\t\tprevlevel = nodes\n\t\tcount *= 2\n\t}\n\t// the datanode level is the nodes on the last level\n\treturn &tree{\n\t\tleaves: prevlevel,\n\t\tbuffer: make([]byte, maxsize),\n\t}\n}", "func (t *BPTree) startNewTree(key []byte, pointer *Record) error {\n\tt.root = t.newLeaf()\n\tt.root.Keys[0] = key\n\tt.root.pointers[0] = pointer\n\tt.root.KeysNum = 1\n\n\treturn nil\n}", "func newTree(segmentSize, depth int, hashfunc func() hash.Hash) *tree {\n\tn := newNode(0, nil, hashfunc())\n\tprevlevel := []*node{n}\n\t// iterate over levels and creates 2^(depth-level) nodes\n\t// the 0 level is on double segment sections so we start at depth - 2 since\n\tcount := 2\n\tfor level := depth - 2; level >= 0; level-- {\n\t\tnodes := make([]*node, count)\n\t\tfor i := 0; i < count; i++ {\n\t\t\tparent := prevlevel[i/2]\n\t\t\tvar hasher hash.Hash\n\t\t\tif level == 0 {\n\t\t\t\thasher = hashfunc()\n\t\t\t}\n\t\t\tnodes[i] = newNode(i, parent, hasher)\n\t\t}\n\t\tprevlevel = nodes\n\t\tcount *= 2\n\t}\n\t// the datanode level is the nodes on the last level\n\treturn &tree{\n\t\tleaves: prevlevel,\n\t\tresult: make(chan []byte),\n\t\tsection: make([]byte, 2*segmentSize),\n\t}\n}", "func NewTree(name string, c *accord.Config) *Tree {\n\treturn &Tree{config: c, name: name}\n}", "func NewObjectTree(flags byte) *ObjectTree { return new(ObjectTree).Init(flags) }", "func newDepTree() *depTree {\n\treturn &depTree{\n\t\tdone: make(map[string]bool),\n\t\tch: make(chan struct{}),\n\t}\n}", "func NewState(t *testing.T, filename string) State {\n\tpuzzle := LoadTestPuzzle(t, filename)\n\n\tcells := make([][]string, puzzle.Rows)\n\tfor col := 0; col < puzzle.Rows; col++ {\n\t\tcells[col] = make([]string, puzzle.Cols)\n\t}\n\n\tnow := time.Now()\n\treturn State{\n\t\tStatus: model.StatusSelected,\n\t\tPuzzle: puzzle,\n\t\tCells: cells,\n\t\tCluesFilled: make(map[string]bool),\n\t\tLastStartTime: &now,\n\t}\n}", "func (d *decoder) createTree() *node {\n\tif val, _ := readBit(d.r); val {\n\t\treturn &node{readByte(d.r), -1, false, nil, nil}\n\t} else if d.numChars != d.numCharsDecoded {\n\t\tleft := d.createTree()\n\t\tright := d.createTree()\n\t\treturn &node{0, -1, true, left, right}\n\t}\n\n\treturn nil\n}", "func NewStateNode(path, hash string, nodeType uint64) *UserState {\n\tnewUserState := &UserState{\n\t\tAccountID: ZERO,\n\t\tPath: path,\n\t\tType: nodeType,\n\t}\n\tnewUserState.UpdatePath(newUserState.Path)\n\tnewUserState.Hash = hash\n\treturn newUserState\n}", "func newMerkleTree(height, n uint32) merkleTree {\n\treturn merkleTreeFromBuf(make([]byte, ((1<<height)-1)*n), height, n)\n}", "func NewTree(repo *Repository, id SHA1) *Tree {\n\treturn &Tree{\n\t\tID: id,\n\t\trepo: repo,\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Len returns the current number of items in the tree It needs to query all allocators for their counters, so it will block if an allocator is constantly reserved...
func (idx *Tree) Len() (count int) { idx.Stop() count = int(idx.liveObjects) for _, a := range idx.allocators { count += int(a.itemCounter) } idx.Start() return }
[ "func (t *Tree) Len() int { return t.Count }", "func (t *BinaryTree) Size() int { return t.count }", "func (t *Tree) Len() int {\n\treturn t.Count\n}", "func (p NodePools) Len() int { return len(p) }", "func (t *Tree) Len() int {\n\treturn (int(t.leaves-1)*BlockSize + int(t.lastBlockLen))\n}", "func (r *Root) Len() uint64 {\n\treturn r.count\n}", "func (n Nodes) Len() int", "func (buf *queueBuffer) Len() uint64 {\n\treturn buf.depth\n}", "func (yfast *YFastTrie) Len() uint64 {\n\treturn yfast.num\n}", "func (ri *rawItemList) len() int { return len(ri.cumSize) }", "func (r *ringBuffer) Len() int {\n\tbufInUse := (r.head - r.current) * r.size\n\treturn r.Buffer.Len() + bufInUse\n}", "func (c *Cache) Len() int {\n\tif c == nil {\n\t\treturn 0\n\t}\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\treturn c.lru.Len() + c.mfa.Len()\n}", "func (t *RbTree[K, V]) Size() int {\n\treturn t.size\n}", "func (table ObjectClassTable) AllocatedCount() int {\n\tcapacity := table.Capacity()\n\tif capacity == 0 {\n\t\treturn 0\n\t}\n\tindex := int(table[0].ObjectID)\n\tcount := 0\n\tfor (index != 0) && (count < capacity) {\n\t\tcount++\n\t\tindex = int(table[index].Next)\n\t}\n\treturn count\n}", "func (t *Treap) Len() int {\n return t.size\n}", "func treeSize(l int) int {\n\t// Account for the root node\n\ttotal := 1\n\tfor ; l > 1; l = (l + 1) / 2 {\n\t\ttotal = total + l\n\t}\n\treturn total\n}", "func (t *TrieNode) Len() int {\n\tt.mx.RLock()\n\tdefer t.mx.RUnlock()\n\treturn t.LenHelper()\n}", "func (obj *hashtree) Length() int {\n\tblockLeaves := obj.Pt.BlockLeaves()\n\treturn len(blockLeaves.Leaves())\n}", "func (h *Heap) Len() int { return len(*h) }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop withdraws all allocators to prevent any more write or reads. It will blocks until it gets all allocators. If already stopped, it returns silently.
func (idx *Tree) Stop() { if !atomic.CompareAndSwapInt32(&idx.stopped, 0, 1) { return } for i := 0; i < len(idx.allocators); i++ { _ = idx.allocatorQueue.get() } }
[ "func (fetchers Fetchers) Stop() {\n\tfor _, fetcher := range fetchers {\n\t\tfetcher.Stop()\n\t}\n}", "func (r *reducer) stop() {\n\tfor _, m := range r.mappers {\n\t\tm.stop()\n\t}\n\tsyncClose(r.done)\n}", "func exitAllocRunner(runners ...AllocRunner) {\n\tfor _, ar := range runners {\n\t\tterminalAlloc := ar.Alloc().Copy()\n\t\tterminalAlloc.DesiredStatus = structs.AllocDesiredStatusStop\n\t\tar.Update(terminalAlloc)\n\t}\n}", "func (it *messageIterator) stop() {\n\tit.cancel()\n\tit.mu.Lock()\n\tit.checkDrained()\n\tit.mu.Unlock()\n\tit.wg.Wait()\n}", "func (p *Pool) Stop() {\n\tp.runMutex.Lock()\n\tdefer p.runMutex.Unlock()\n\n\tif !p.running {\n\t\tlog.Trace(\"Not running, ignoring Stop() call\")\n\t\treturn\n\t}\n\n\tlog.Trace(\"Stopping all feedConn goroutines\")\n\tvar wg sync.WaitGroup\n\twg.Add(p.actualSize)\n\tfor i := 0; i < p.actualSize; i++ {\n\t\tp.stopCh <- &wg\n\t}\n\twg.Wait()\n\n\tp.running = false\n}", "func (c *ResourceSemaphore) stop() {\n\tcount := 0\n\n\tfor {\n\t\tselect {\n\t\tcase d := <-c.received: // wait for all resource released\n\t\t\tc.storage <- d\n\t\tcase <-c.storage:\n\t\t\tcount += 1\n\t\t\tif count == c.size {\n\t\t\t\tclose(c.endChan) // all resource released\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (p *Pacer) Stop() {\n\tclose(p.gate)\n}", "func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) {\n\tstop := make(chan struct{})\n\twait := make(chan struct{})\n\n\t// get the runContext cancellation channel now, because releaseRun will\n\t// write to the runContext field.\n\tdone := c.runContext.Done()\n\n\tgo func() {\n\t\tdefer close(wait)\n\t\t// Wait for a stop or completion\n\t\tselect {\n\t\tcase <-done:\n\t\t\t// done means the context was canceled, so we need to try and stop\n\t\t\t// providers.\n\t\tcase <-stop:\n\t\t\t// our own stop channel was closed.\n\t\t\treturn\n\t\t}\n\n\t\t// If we're here, we're stopped, trigger the call.\n\n\t\t{\n\t\t\t// Copy the providers so that a misbehaved blocking Stop doesn't\n\t\t\t// completely hang Terraform.\n\t\t\twalker.providerLock.Lock()\n\t\t\tps := make([]ResourceProvider, 0, len(walker.providerCache))\n\t\t\tfor _, p := range walker.providerCache {\n\t\t\t\tps = append(ps, p)\n\t\t\t}\n\t\t\tdefer walker.providerLock.Unlock()\n\n\t\t\tfor _, p := range ps {\n\t\t\t\t// We ignore the error for now since there isn't any reasonable\n\t\t\t\t// action to take if there is an error here, since the stop is still\n\t\t\t\t// advisory: Terraform will exit once the graph node completes.\n\t\t\t\tp.Stop()\n\t\t\t}\n\t\t}\n\n\t\t{\n\t\t\t// Call stop on all the provisioners\n\t\t\twalker.provisionerLock.Lock()\n\t\t\tps := make([]ResourceProvisioner, 0, len(walker.provisionerCache))\n\t\t\tfor _, p := range walker.provisionerCache {\n\t\t\t\tps = append(ps, p)\n\t\t\t}\n\t\t\tdefer walker.provisionerLock.Unlock()\n\n\t\t\tfor _, p := range ps {\n\t\t\t\t// We ignore the error for now since there isn't any reasonable\n\t\t\t\t// action to take if there is an error here, since the stop is still\n\t\t\t\t// advisory: Terraform will exit once the graph node completes.\n\t\t\t\tp.Stop()\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn stop, wait\n}", "func (c *stoppableContext) stop() {\n\tc.stopOnce.Do(func() {\n\t\tclose(c.stopped)\n\t})\n\n\tc.stopWg.Wait()\n}", "func (m *mapper) stop() { syncClose(m.done) }", "func (p *hardwareProfiler) Stop() error {\n\tvar err error\n\tfor _, profiler := range p.profilers {\n\t\terr = multierr.Append(err, profiler.Stop())\n\t}\n\treturn err\n}", "func (b *Batcher) Stop() {\n\tif b.ch == nil {\n\t\tpanic(\"BUG: forgot calling Batcher.Start()?\")\n\t}\n\tclose(b.ch)\n\t<-b.doneCh\n\tb.ch = nil\n\tb.doneCh = nil\n}", "func (collection *Collection) Stop() {\n\tfor _, c := range collection.collectors {\n\t\tc.Stop()\n\t\tcollection.wg.Done()\n\t}\n}", "func (ps *rateLimiter) Stop() { close(ps.exit) }", "func (l *Limiter) Stop() {\n\tl.lock.Lock()\n\tl.isStopped = true\n\tl.lock.Unlock()\n}", "func (it *Iterator) Stop() {\n\tit.mu.Lock()\n\tdefer it.mu.Unlock()\n\n\tselect {\n\tcase <-it.closed:\n\t\t// Cleanup has already been performed.\n\t\treturn\n\tdefault:\n\t}\n\n\t// We close this channel before calling it.puller.Stop to ensure that we\n\t// reliably return Done from Next.\n\tclose(it.closed)\n\n\t// Stop the puller. Once this completes, no more messages will be added\n\t// to it.ka.\n\tit.puller.Stop()\n\n\t// Start acking messages as they arrive, ignoring ackTicker. This will\n\t// result in it.ka.Stop, below, returning as soon as possible.\n\tit.acker.FastMode()\n\n\t// This will block until\n\t// (a) it.ka.Ctx is done, or\n\t// (b) all messages have been removed from keepAlive.\n\t// (b) will happen once all outstanding messages have been either ACKed or NACKed.\n\tit.ka.Stop()\n\n\t// There are no more live messages, so kill off the acker.\n\tit.acker.Stop()\n\n\tit.kaTicker.Stop()\n\tit.ackTicker.Stop()\n}", "func (batcr *BlockChain) Stop() {\n\tif !atomicPtr.CompareAndSwapInt32(&batcr.running, 0, 1) {\n\t\treturn\n\t}\n\t// Unsubscribe all subscriptions registered from blockchain\n\tbatcr.scope.Close()\n\tclose(batcr.quit)\n\tatomicPtr.StoreInt32(&batcr.procInterrupt, 1)\n\n\tbatcr.wg.Wait()\n\tbgmlogs.Info(\"Blockchain manager stopped\")\n}", "func (cm *CertMan) Stop() {\n\tcm.watching <- false\n}", "func (l *Launcher) Stop() {\n\tl.stop <- struct{}{}\n\tstopper := startstop.NewParallelStopper()\n\tfor identifier, tailer := range l.tailers {\n\t\tstopper.Add(tailer)\n\t\tdelete(l.tailers, identifier)\n\t}\n\tstopper.Stop()\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start releases all allocators withdrawn through a previous call to Stop. In case the indexed is not stopped in returns silently.
func (idx *Tree) Start() { if !atomic.CompareAndSwapInt32(&idx.stopped, 1, 0) { return } for i := 0; i < len(idx.allocators); i++ { idx.allocatorQueue.put(i) } }
[ "func (idx *Tree) Stop() {\n\tif !atomic.CompareAndSwapInt32(&idx.stopped, 0, 1) {\n\t\treturn\n\t}\n\tfor i := 0; i < len(idx.allocators); i++ {\n\t\t_ = idx.allocatorQueue.get()\n\t}\n}", "func (d *dummyContractStakingIndexer) Start(ctx context.Context) error {\n\treturn nil\n}", "func (mi *MinerIndex) start() {\n\tdefer func() { mi.finished <- struct{}{} }()\n\n\tif err := mi.updateOnChainIndex(); err != nil {\n\t\tlog.Errorf(\"error on initial updating miner index: %s\", err)\n\t}\n\tmi.chMeta <- struct{}{}\n\tfor {\n\t\tselect {\n\t\tcase <-mi.ctx.Done():\n\t\t\tlog.Info(\"graceful shutdown of background miner index\")\n\t\t\treturn\n\t\tcase <-time.After(metadataRefreshInterval):\n\t\t\tselect {\n\t\t\tcase mi.chMeta <- struct{}{}:\n\t\t\tdefault:\n\t\t\t\tlog.Info(\"skipping meta index update since it's busy\")\n\t\t\t}\n\t\tcase <-time.After(util.AvgBlockTime):\n\t\t\tif err := mi.updateOnChainIndex(); err != nil {\n\t\t\t\tlog.Errorf(\"error when updating miner index: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}", "func exitAllocRunner(runners ...AllocRunner) {\n\tfor _, ar := range runners {\n\t\tterminalAlloc := ar.Alloc().Copy()\n\t\tterminalAlloc.DesiredStatus = structs.AllocDesiredStatusStop\n\t\tar.Update(terminalAlloc)\n\t}\n}", "func (gc *GC) Start() *GC {\n\tgc.mu.Lock()\n\tdefer gc.mu.Unlock()\n\tif gc.ticker != nil {\n\t\treturn gc // already started\n\t}\n\tgc.ticker = time.NewTicker(gc.interval)\n\tgo func() {\n\t\tfor _ = range gc.ticker.C {\n\t\t\tgc.Collect() // ignore error\n\t\t}\n\t}()\n\treturn gc\n}", "func (d *dummyContractStakingIndexer) Stop(ctx context.Context) error {\n\treturn nil\n}", "func TestVolumeWatch_StartStop(t *testing.T) {\n\tci.Parallel(t)\n\n\tsrv := &MockStatefulRPCServer{}\n\tsrv.state = state.TestStateStore(t)\n\tindex := uint64(100)\n\twatcher := NewVolumesWatcher(testlog.HCLogger(t), srv, \"\")\n\twatcher.quiescentTimeout = 100 * time.Millisecond\n\n\twatcher.SetEnabled(true, srv.State(), \"\")\n\trequire.Equal(t, 0, len(watcher.watchers))\n\n\tplugin := mock.CSIPlugin()\n\tnode := testNode(plugin, srv.State())\n\talloc1 := mock.Alloc()\n\talloc1.ClientStatus = structs.AllocClientStatusRunning\n\talloc2 := mock.Alloc()\n\talloc2.Job = alloc1.Job\n\talloc2.ClientStatus = structs.AllocClientStatusRunning\n\tindex++\n\terr := srv.State().UpsertJob(structs.MsgTypeTestSetup, index, alloc1.Job)\n\trequire.NoError(t, err)\n\tindex++\n\terr = srv.State().UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc1, alloc2})\n\trequire.NoError(t, err)\n\n\t// register a volume\n\tvol := testVolume(plugin, alloc1, node.ID)\n\tindex++\n\terr = srv.State().UpsertCSIVolume(index, []*structs.CSIVolume{vol})\n\trequire.NoError(t, err)\n\n\t// assert we get a watcher; there are no claims so it should immediately stop\n\trequire.Eventually(t, func() bool {\n\t\twatcher.wlock.RLock()\n\t\tdefer watcher.wlock.RUnlock()\n\t\treturn 1 == len(watcher.watchers) &&\n\t\t\t!watcher.watchers[vol.ID+vol.Namespace].isRunning()\n\t}, time.Second*2, 10*time.Millisecond)\n\n\t// claim the volume for both allocs\n\tclaim := &structs.CSIVolumeClaim{\n\t\tAllocationID: alloc1.ID,\n\t\tNodeID: node.ID,\n\t\tMode: structs.CSIVolumeClaimRead,\n\t\tAccessMode: structs.CSIVolumeAccessModeMultiNodeReader,\n\t}\n\n\tindex++\n\terr = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim)\n\trequire.NoError(t, err)\n\tclaim.AllocationID = alloc2.ID\n\tindex++\n\terr = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim)\n\trequire.NoError(t, err)\n\n\t// reap the volume and assert nothing has happened\n\tclaim = &structs.CSIVolumeClaim{\n\t\tAllocationID: alloc1.ID,\n\t\tNodeID: node.ID,\n\t}\n\tindex++\n\terr = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim)\n\trequire.NoError(t, err)\n\n\tws := memdb.NewWatchSet()\n\tvol, _ = srv.State().CSIVolumeByID(ws, vol.Namespace, vol.ID)\n\trequire.Equal(t, 2, len(vol.ReadAllocs))\n\n\t// alloc becomes terminal\n\talloc1.ClientStatus = structs.AllocClientStatusComplete\n\tindex++\n\terr = srv.State().UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc1})\n\trequire.NoError(t, err)\n\tindex++\n\tclaim.State = structs.CSIVolumeClaimStateReadyToFree\n\terr = srv.State().CSIVolumeClaim(index, vol.Namespace, vol.ID, claim)\n\trequire.NoError(t, err)\n\n\t// 1 claim has been released and watcher stops\n\trequire.Eventually(t, func() bool {\n\t\tws := memdb.NewWatchSet()\n\t\tvol, _ := srv.State().CSIVolumeByID(ws, vol.Namespace, vol.ID)\n\t\treturn len(vol.ReadAllocs) == 1 && len(vol.PastClaims) == 0\n\t}, time.Second*2, 10*time.Millisecond)\n\n\trequire.Eventually(t, func() bool {\n\t\twatcher.wlock.RLock()\n\t\tdefer watcher.wlock.RUnlock()\n\t\treturn !watcher.watchers[vol.ID+vol.Namespace].isRunning()\n\t}, time.Second*5, 10*time.Millisecond)\n}", "func stopWatchHeapOps() {\n\tquitChan <- true\n}", "func (t *memoryLimitTuner) Start() {\n\tt.finalizer = newFinalizer(t.tuning) // Start tuning\n}", "func (ta *CachedAllocator) Start() error {\n\tta.TChan.Init()\n\tta.wg.Add(1)\n\tgo ta.mainLoop()\n\treturn nil\n}", "func (a *SparseAutomaton) Start() sparseVector {\n\n\tvals := make([]int, a.max+1)\n\tfor i := 0; i < a.max+1; i++ {\n\t\tvals[i] = i\n\t}\n\n\treturn newSparseVector(vals)\n\n}", "func (i *Index) Freeze() {\n\ti.frozen = true\n}", "func (s *Index) start() {\n\tdefer close(s.finished)\n\tif err := s.updateIndex(); err != nil {\n\t\tlog.Errorf(\"error on first updating slashing history: %s\", err)\n\t}\n\tfor {\n\t\tselect {\n\t\tcase <-s.ctx.Done():\n\t\t\tlog.Info(\"graceful shutdown of background slashing updater\")\n\t\t\treturn\n\t\tcase <-time.After(util.AvgBlockTime):\n\t\t\tif err := s.updateIndex(); err != nil {\n\t\t\t\tlog.Errorf(\"error when updating slashing history: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}", "func (i *wasmerInstance) freeAllocatedMemory() {\n\tfor addr, size := range i.allocatedMemory {\n\t\terr := i.deallocate(addr, size)\n\t\tif err != nil {\n\t\t\ti.log.Error(\n\t\t\t\t\"Unable to deallocate memory, potential memory leak\",\n\t\t\t\t\"addr\", addr,\n\t\t\t\t\"size\", size,\n\t\t\t\t\"error\", err,\n\t\t\t)\n\t\t}\n\n\t\ti.log.Debug(\n\t\t\t\"Deallocated module instance memory\",\n\t\t\t\"addr\", addr,\n\t\t\t\"size\", size,\n\t\t)\n\t}\n\n\t// clear the cache\n\ti.allocatedMemory = map[int32]int32{}\n}", "func (ns *EsIndexer) Stop() {\n\n}", "func (gc *garbageCollector) recycleUnusedIndexFiles() {\n\tlog.Info(\"start recycleUnusedIndexFiles\")\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tstartTs := time.Now()\n\tprefix := path.Join(gc.option.cli.RootPath(), common.SegmentIndexPath) + \"/\"\n\t// list dir first\n\tkeys, _, err := gc.option.cli.ListWithPrefix(ctx, prefix, false)\n\tif err != nil {\n\t\tlog.Warn(\"garbageCollector recycleUnusedIndexFiles list keys from chunk manager failed\", zap.Error(err))\n\t\treturn\n\t}\n\tlog.Info(\"recycleUnusedIndexFiles, finish list object\", zap.Duration(\"time spent\", time.Since(startTs)), zap.Int(\"build ids\", len(keys)))\n\tfor _, key := range keys {\n\t\tlog.Debug(\"indexFiles keys\", zap.String(\"key\", key))\n\t\tbuildID, err := parseBuildIDFromFilePath(key)\n\t\tif err != nil {\n\t\t\tlog.Warn(\"garbageCollector recycleUnusedIndexFiles parseIndexFileKey\", zap.String(\"key\", key), zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\t\tlog.Info(\"garbageCollector will recycle index files\", zap.Int64(\"buildID\", buildID))\n\t\tcanRecycle, segIdx := gc.meta.CleanSegmentIndex(buildID)\n\t\tif !canRecycle {\n\t\t\t// Even if the index is marked as deleted, the index file will not be recycled, wait for the next gc,\n\t\t\t// and delete all index files about the buildID at one time.\n\t\t\tlog.Info(\"garbageCollector can not recycle index files\", zap.Int64(\"buildID\", buildID))\n\t\t\tcontinue\n\t\t}\n\t\tif segIdx == nil {\n\t\t\t// buildID no longer exists in meta, remove all index files\n\t\t\tlog.Info(\"garbageCollector recycleUnusedIndexFiles find meta has not exist, remove index files\",\n\t\t\t\tzap.Int64(\"buildID\", buildID))\n\t\t\terr = gc.option.cli.RemoveWithPrefix(ctx, key)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warn(\"garbageCollector recycleUnusedIndexFiles remove index files failed\",\n\t\t\t\t\tzap.Int64(\"buildID\", buildID), zap.String(\"prefix\", key), zap.Error(err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Info(\"garbageCollector recycleUnusedIndexFiles remove index files success\",\n\t\t\t\tzap.Int64(\"buildID\", buildID), zap.String(\"prefix\", key))\n\t\t\tcontinue\n\t\t}\n\t\tfilesMap := make(map[string]struct{})\n\t\tfor _, fileID := range segIdx.IndexFileKeys {\n\t\t\tfilepath := metautil.BuildSegmentIndexFilePath(gc.option.cli.RootPath(), segIdx.BuildID, segIdx.IndexVersion,\n\t\t\t\tsegIdx.PartitionID, segIdx.SegmentID, fileID)\n\t\t\tfilesMap[filepath] = struct{}{}\n\t\t}\n\t\tfiles, _, err := gc.option.cli.ListWithPrefix(ctx, key, true)\n\t\tif err != nil {\n\t\t\tlog.Warn(\"garbageCollector recycleUnusedIndexFiles list files failed\",\n\t\t\t\tzap.Int64(\"buildID\", buildID), zap.String(\"prefix\", key), zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\t\tlog.Info(\"recycle index files\", zap.Int64(\"buildID\", buildID), zap.Int(\"meta files num\", len(filesMap)),\n\t\t\tzap.Int(\"chunkManager files num\", len(files)))\n\t\tdeletedFilesNum := 0\n\t\tfor _, file := range files {\n\t\t\tif _, ok := filesMap[file]; !ok {\n\t\t\t\tif err = gc.option.cli.Remove(ctx, file); err != nil {\n\t\t\t\t\tlog.Warn(\"garbageCollector recycleUnusedIndexFiles remove file failed\",\n\t\t\t\t\t\tzap.Int64(\"buildID\", buildID), zap.String(\"file\", file), zap.Error(err))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tdeletedFilesNum++\n\t\t\t}\n\t\t}\n\t\tlog.Info(\"index files recycle success\", zap.Int64(\"buildID\", buildID),\n\t\t\tzap.Int(\"delete index files num\", deletedFilesNum))\n\t}\n}", "func Free(descr C.int) {\n\tStop(descr)\n\tUnregisterAlgorithm((AlgorithmDescr)(descr))\n}", "func (scan TransactionFinalizer) Start() {\n\tticker := time.NewTicker(scan.scanInterval)\n\tdefer ticker.Stop()\n\n\tscan.MarkReady()\n\n\tselect {\n\tcase <-scan.CanStart:\n\t\tbreak\n\tcase <-scan.Done():\n\t\tscan.MarkDone()\n\t\treturn\n\t}\n\n\tlog.Info().Msgf(\"Start transaction-finalizer check daemon, scan each %v\", scan.scanInterval)\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-scan.Done():\n\t\t\t\tscan.MarkDone()\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tscan.metrics.TimeFinalizeTransactions(func() {\n\t\t\t\t\tscan.finalizeStaleTransactions()\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}()\n\n\tscan.WaitStop()\n\tlog.Info().Msg(\"Stop transaction-finalizer daemon\")\n}", "func (a *AllIterator) Release() {\n\tif a.curIter != nil {\n\t\ta.curIter.Release()\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
WriteState writes the state of a stopped index to the given writer. If the indexed is not stopped the result is undefined.
func (idx *Tree) WriteState(out io.Writer) (n int, err error) { return idx.writeState(out) }
[ "func (c *Client) WriteState(m gopongmsg.State) error {\n\tm.LastInputIndex = c.GetLastInputIndex()\n\tmsg := gopongmsg.Server{\n\t\tMsg: &gopongmsg.Server_State{\n\t\t\tState: &m,\n\t\t},\n\t}\n\n\treturn c.Write(msg)\n}", "func (d *EtcdStateDriver) WriteState(key string, value core.State,\n\tmarshal func(interface{}) ([]byte, error)) error {\n\tencodedState, err := marshal(value)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn d.Write(key, encodedState)\n}", "func (w ResponseWriter) SetState(k string, v interface{}) {\n\tst := w.State()\n\tst[k] = v\n}", "func (x *Xosh) WriteState(b []byte) {\r\n\tif len(b) < XoshStateSize {\r\n\t\tpanic(\"ReadState: byte slice too short\")\r\n\t}\r\n\t// This expects a little endian cpu, eg. all amd64.\r\n\t*(*uint64)(unsafe.Pointer(&b[ 0])) = bits.ReverseBytes64(x.s0)\r\n\t*(*uint64)(unsafe.Pointer(&b[ 8])) = bits.ReverseBytes64(x.s1)\r\n\t*(*uint64)(unsafe.Pointer(&b[16])) = bits.ReverseBytes64(x.s2)\r\n\t*(*uint64)(unsafe.Pointer(&b[24])) = bits.ReverseBytes64(x.s3)\r\n}", "func (s LocalBackend) WriteState(st *State) error {\n\tlog.Debugf(\"Writing state to %s\\n\", s.Path)\n\tdata, err := json.MarshalIndent(st, \"\", \" \")\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to Load State for Writing\")\n\t}\n\terr = ioutil.WriteFile(s.Path, data, 0644)\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to write state to file\")\n\t}\n\treturn nil\n}", "func (s *Stdio) writeState(ctx context.Context) error {\n\tif s.state != nil {\n\t\tjs, err := json.MarshalIndent(&s.state, \"\", \" \")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = ioutil.WriteFile(s.StateOutputFilename, js, 0644); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (w *Writer) writeIndex() (int64, error) {\n\tw.written = true\n\n\tbuf := new(bytes.Buffer)\n\tst := sst.NewWriter(buf)\n\n\tw.spaceIds.Sort()\n\n\t// For each defined space, we index the space's\n\t// byte offset in the file and the length in bytes\n\t// of all data in the space.\n\tfor _, spaceId := range w.spaceIds {\n\t\tb := new(bytes.Buffer)\n\n\t\tbinary.WriteInt64(b, w.spaceOffsets[spaceId])\n\t\tbinary.WriteInt64(b, w.spaceLengths[spaceId])\n\n\t\tif err := st.Set([]byte(spaceId), b.Bytes()); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tif err := st.Close(); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn buf.WriteTo(w.file)\n}", "func (r *templateRouter) writeState() error {\n\tdat, err := json.MarshalIndent(r.state, \"\", \" \")\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to marshal route table: %v\", err)\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(routeFile, dat, 0644)\n\tif err != nil {\n\t\tglog.Errorf(\"Failed to write route table: %v\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *stateFile) WriteQueueState(queue queue.Queue) error {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tif len(queue.Items) == 0 {\n\t\treturn s.ClearQueueState()\n\t}\n\tos.MkdirAll(s.path, 0777)\n\tq, err := queue.MarshalText()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal queue: %v\", err)\n\t}\n\tqueueFile := filepath.Join(s.path, s.name)\n\treturn ioutil.WriteFile(queueFile, q, 0666)\n}", "func (s *stateFile) WriteCurrentState(item queue.Item) error {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tif item.Operation == \"\" {\n\t\treturn s.ClearCurrentState()\n\t}\n\tos.MkdirAll(s.path, 0777)\n\tcurrentFile := filepath.Join(s.path, s.name+\"-current\")\n\ttext, err := item.MarshalText()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(currentFile, text, 0666)\n}", "func (r *Prng) WriteState(b []byte) {\n\tr.rng.WriteState(b)\n}", "func (x *Index) Write(w io.Writer) error", "func (losm LogOnlyStateManager) WriteState(state *sous.State, _ sous.User) error {\n\treportWriting(losm.log, time.Now(), state, nil)\n\treturn nil\n}", "func (ms *MaintenanceState) Write() error {\n\tdata, err := json.MarshalIndent(ms.state, \"\", \" \")\n\trtx.Must(err, \"Could not marshal MaintenanceState to a buffer. This should never happen.\")\n\n\terr = ioutil.WriteFile(ms.filename, data, 0664)\n\tif err != nil {\n\t\tlog.Printf(\"ERROR: Failed to write state to %s: %s\", ms.filename, err)\n\t\tmetrics.Error.WithLabelValues(\"writefile\", \"maintenancestate.Write\").Add(1)\n\t\treturn err\n\t}\n\n\tlog.Printf(\"INFO: Successfully wrote state to %s.\", ms.filename)\n\treturn nil\n}", "func (p *Platform) WriteState(w io.Writer) (*Platform, error) {\n\treturn p, terraform.WriteState(p.State, w)\n}", "func WriteHTTPState(l log.Logger, w io.Writer, n *Node) {\n\tstate, err := n.controller.GetState(context.Background())\n\tif err != nil {\n\t\tlevel.Error(l).Log(\"msg\", \"failed to get state\", \"err\", err)\n\t\treturn\n\t}\n\n\terr = pageTemplate.Execute(w, struct {\n\t\tNow time.Time\n\t\tState *api.State\n\t}{\n\t\tNow: time.Now(),\n\t\tState: state,\n\t})\n\tif err != nil {\n\t\tlevel.Error(l).Log(\"msg\", \"failed to execute template\", \"err\", err)\n\t}\n}", "func (w ResponseWriter) State() map[string]interface{} {\n\treturn w.state\n}", "func (q *T) updateWriterState(w *writer, overrideBusy bool, isActive bool, consumed int) {\n\tq.mutex.Lock()\n\tif isActive {\n\t\tif w.isActive == idle || w.isActive == busy && overrideBusy {\n\t\t\tq.active[w.priority] = append(q.active[w.priority], w)\n\t\t\tw.isActive = active\n\t\t\tw.deficit -= consumed\n\t\t\tif w.deficit < 0 {\n\t\t\t\tpanic(\"deficit is negative\")\n\t\t\t}\n\t\t\tq.cond.Signal()\n\t\t}\n\t} else {\n\t\tif w.isActive == active {\n\t\t\tpanic(\"Writer is active when it should not be\")\n\t\t}\n\t\tif overrideBusy {\n\t\t\tw.isActive = idle\n\t\t}\n\t\tw.deficit = 0\n\t}\n\tq.mutex.Unlock()\n}", "func (c *Conn) WriteServiceResState(v bool) error {\n\tb := byte(0)\n\tif v {\n\t\tb = 1\n\t}\n\n\t_, err := c.writeBuf.Write([]byte{b})\n\t// do not flush, since WriteServiceResState() is always called before a WriteMessage()\n\treturn err\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
allocateNode returns the new node and its data block, position at start
func (idx *Tree) allocateNode(a *Allocator, count int, prefixLen int) (n uint64, data []uint64) { prefixSlots := (prefixLen + 7) >> 3 if prefixLen >= 255 { prefixSlots++ } count += prefixSlots n = a.newNode(count) block := int(n >> blockSlotsShift) offset := int(n & blockSlotsOffsetMask) data = idx.blocks[block].data[offset:] return }
[ "func (t *BTree) AllocateNode() *BTreeNode {\n\tx := BTreeNode{}\n\tfor i := 0; i < 2*t.t; i++ {\n\t\tx.children = append(x.children, t.nullNode)\n\t}\n\tfor i := 0; i < 2*t.t-1; i++ {\n\t\tx.keys = append(x.keys, -1)\n\t}\n\treturn &x\n}", "func (idx *Tree) allocateNodeWithPrefix(a *Allocator, count int, prefix []byte) (n uint64, data []uint64) {\n\tprefixLen := len(prefix)\n\tprefixSlots := (prefixLen + 7) >> 3\n\tif prefixLen >= 255 {\n\t\tprefixSlots++\n\t}\n\tcount += prefixSlots\n\tn = a.newNode(count)\n\tblock := int(n >> blockSlotsShift)\n\toffset := int(n & blockSlotsOffsetMask)\n\tdata = idx.blocks[block].data[offset:]\n\tif prefixLen > 0 {\n\t\tstorePrefix(data, prefix)\n\t\tdata = data[prefixSlots:]\n\t}\n\treturn\n}", "func (allc *Allocator) makeNode(truncatedSize uint32) uint32 {\n\t// Calculate the amount of actual memory required for this node.\n\t// Depending on the height of the node, size might be truncated.\n\tsize := defaultNodeSize - truncatedSize\n\t/*padding := size % cacheLineSize\n\tif padding < paddingLimit {\n\t\tsize += padding\n\t}*/\n\treturn allc.new(size)\n}", "func (b *nodeBuilder) createNode(rng Range, kind AstNodeKind, scope ScopePosition) NodePosition {\n\t// maybe we should handle here the capacity of the node arrays ?\n\tl := NodePosition(len(b.nodes))\n\tb.nodes = append(b.nodes, AstNode{Kind: kind, Range: rng, Scope: scope})\n\treturn l\n}", "func (t *Btree) newNode() *Node {\n\t*t.NodeCount++\n\tid := t.genrateID()\n\tnode := &Node{\n\t\tNodeRecordMetaData: NodeRecordMetaData{\n\t\t\tId: proto.Int64(id),\n\t\t\tIsDirt: proto.Int32(0),\n\t\t},\n\t}\n\tt.nodes[id] = node\n\treturn node\n}", "func (n *Network) createNode(send int) *Node {\n\tnode := Node{value: 0, influenceRecieved: 0, inputRecieved: 0, id: len(n.nodeList), receive: make([]*Connection, 0, 0), send: make([]Connection, 0, send)}\n\n\tif len(n.nodeList) >= cap(n.nodeList) {\n\t\tn.nodeList = append(n.nodeList, node)\n\t} else {\n\t\tn.nodeList = n.nodeList[0 : len(n.nodeList)+1]\n\t\tn.nodeList[len(n.nodeList)-1] = node\n\t}\n\n\treturn &n.nodeList[len(n.nodeList)-1]\n}", "func NewNode(host string, size int) Node {\n\treturn node{host: host, size: size}\n}", "func createNewNode(key int) *Node {\n\treturn &Node{key: key, value: 1, left: nil, right: nil}\n}", "func (t *MCTS) alloc() naughty {\n\tt.Lock()\n\tl := len(t.freelist)\n\tif l == 0 {\n\t\tN := Node{\n\t\t\ttree: ptrFromTree(t),\n\t\t\tid: naughty(len(t.nodes)),\n\n\t\t\tminPSARatioChildren: defaultMinPsaRatio,\n\t\t}\n\t\tt.nodes = append(t.nodes, N)\n\t\tt.children = append(t.children, make([]naughty, 0, t.M*t.N+1))\n\t\tt.childLock = append(t.childLock, sync.Mutex{})\n\t\tn := naughty(len(t.nodes) - 1)\n\t\tt.Unlock()\n\t\treturn n\n\t}\n\n\ti := t.freelist[l-1]\n\tt.freelist = t.freelist[:l-1]\n\tt.Unlock()\n\treturn naughty(i)\n}", "func createNode(parent *hostNode, entry modules.HostEntry) *hostNode {\n\treturn &hostNode{\n\t\tparent: parent,\n\t\tweight: entryWeight(entry),\n\t\tcount: 1,\n\n\t\ttaken: true,\n\t\thostEntry: entry,\n\t}\n}", "func createNewEmptyNode() Node {\n\tnextNewId--\n\treturn Node{\n\t\tId: nextNewId,\n\t\tVisible: true,\n\t\tTimestamp: time.Now().Format(\"2006-01-02T15:04:05Z\"),\n\t\tVersion: \"1\",\n\t}\n}", "func NewNode() *Node {\n var n Node\n\n n.children = make(PriorityQueue, 0)\n heap.Init(&n.children)\n\n return &n\n}", "func NodeNew(data uintptr) *Node {\n\tc_data := (C.gpointer)(data)\n\n\tretC := C.g_node_new(c_data)\n\tretGo := NodeNewFromC(unsafe.Pointer(retC))\n\n\treturn retGo\n}", "func createNewNode(ctx context.Context, nodeName string, virtual bool, clientset kubernetes.Interface) (*corev1.Node, error) {\n\tresources := corev1.ResourceList{}\n\tresources[corev1.ResourceCPU] = *resource.NewScaledQuantity(5000, resource.Milli)\n\tresources[corev1.ResourceMemory] = *resource.NewScaledQuantity(5, resource.Mega)\n\tnode := &corev1.Node{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: nodeName,\n\t\t},\n\t}\n\tif virtual {\n\t\tnode.Labels = map[string]string{\n\t\t\tconsts.TypeLabel: consts.TypeNode,\n\t\t}\n\t}\n\tnode.Status = corev1.NodeStatus{\n\t\tCapacity: resources,\n\t\tAllocatable: resources,\n\t\tConditions: []corev1.NodeCondition{\n\t\t\t0: {\n\t\t\t\tType: corev1.NodeReady,\n\t\t\t\tStatus: corev1.ConditionTrue,\n\t\t\t},\n\t\t},\n\t}\n\tnode, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn node, nil\n}", "func (h *handler) createNode(keypath string, value string, ttl int) (int, error) {\n\t// Default the value to \"-\" if it is blank.\n\tif len(value) == 0 {\n\t\tvalue = \"-\"\n\t}\n\n\t// Create an incrementing id for the lock.\n\tresp, err := h.client.AddChild(keypath, value, uint64(ttl))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tindexpath := resp.Node.Key\n\tindex, err := strconv.Atoi(path.Base(indexpath))\n\treturn index, err\n}", "func (t *MCTS) alloc() Naughty {\n\tt.Lock()\n\tdefer t.Unlock()\n\tl := len(t.freelist)\n\tif l == 0 {\n\t\tN := Node{\n\t\t\tlock: sync.Mutex{},\n\t\t\ttree: ptrFromTree(t),\n\t\t\tid: Naughty(len(t.nodes)),\n\t\t\thasChildren: false,\n\t\t}\n\t\tt.nodes = append(t.nodes, N)\n\t\tt.children = append(t.children, make([]Naughty, 0, t.current.ActionSpace()))\n\t\tn := Naughty(len(t.nodes) - 1)\n\t\treturn n\n\t}\n\n\ti := t.freelist[l-1]\n\tt.freelist = t.freelist[:l-1]\n\treturn i\n}", "func (bpt *BplusTree) treeNodeInit(isLeaf bool, next common.Key, prev common.Key,\n\tinitLen int) *treeNode {\n\n\tnode := defaultAlloc()\n\tnode.Children = make([]treeNodeElem, initLen, bpt.context.maxDegree)\n\tnode.IsLeaf = isLeaf\n\tnode.NextKey = next\n\tnode.PrevKey = prev\n\t// Generate a new key for the node being added.\n\tnode.NodeKey = common.Generate(bpt.context.keyType, bpt.context.pfx)\n\treturn node\n}", "func (t *tree) newNode(p *Node) *Node {\n\tt.Index = t.Index + 1\n\treturn &Node{\n\t\ttree: t,\n\t\tParent: p,\n\t\tIndex: t.Index,\n\t}\n}", "func newNode() *node {\n\treturn &node{}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
allocateNodeWithPrefix returns the new node and its data block, positioned after the prefix
func (idx *Tree) allocateNodeWithPrefix(a *Allocator, count int, prefix []byte) (n uint64, data []uint64) { prefixLen := len(prefix) prefixSlots := (prefixLen + 7) >> 3 if prefixLen >= 255 { prefixSlots++ } count += prefixSlots n = a.newNode(count) block := int(n >> blockSlotsShift) offset := int(n & blockSlotsOffsetMask) data = idx.blocks[block].data[offset:] if prefixLen > 0 { storePrefix(data, prefix) data = data[prefixSlots:] } return }
[ "func (idx *Tree) allocateNode(a *Allocator, count int, prefixLen int) (n uint64, data []uint64) {\n\tprefixSlots := (prefixLen + 7) >> 3\n\tif prefixLen >= 255 {\n\t\tprefixSlots++\n\t}\n\tcount += prefixSlots\n\tn = a.newNode(count)\n\tblock := int(n >> blockSlotsShift)\n\toffset := int(n & blockSlotsOffsetMask)\n\tdata = idx.blocks[block].data[offset:]\n\treturn\n}", "func (n *nodeHeader) setPrefix(p []byte) {\n\tpLen, pBytes := n.prefixFields()\n\n\t// Write to the byte array and set the length field to the num bytes copied\n\t*pLen = uint16(copy(pBytes, p))\n}", "func withPrefix(node *Prefix) prefixOption {\n\treturn func(m *PrefixMutation) {\n\t\tm.oldValue = func(context.Context) (*Prefix, error) {\n\t\t\treturn node, nil\n\t\t}\n\t\tm.id = &node.ID\n\t}\n}", "func (n *nodeHeader) prefix() []byte {\n\tpLen, pBytes := n.prefixFields()\n\n\tif *pLen <= maxPrefixLen {\n\t\t// We have the whole prefix from the node\n\t\treturn pBytes[0:*pLen]\n\t}\n\n\t// Prefix is too long for node, we have to go find it from the leaf\n\tminLeaf := n.minChild().leafNode()\n\treturn minLeaf.key[0:*pLen]\n}", "func newPrefixHelper(prefix []rune) *prefixHelper {\n\treturn &prefixHelper{append([]rune(nil), prefix...)}\n}", "func (t *Trie) getNodeAtPrefix(prefix string) (*Node, error) {\n\trunner := t.Root\n\n\tfor _, currChar := range prefix {\n\t\tif _, ok := runner.Children[currChar]; !ok {\n\t\t\treturn nil, errors.New(\"prefix not found\")\n\t\t}\n\t\trunner = runner.Children[currChar]\n\t}\n\n\treturn runner, nil\n}", "func (n Node) generatePrefix() string {\n\tindicator := \"├── \"\n\tif n.isLastElement() {\n\t\tindicator = \"└── \"\n\t}\n\n\treturn n.getPrefixes() + indicator\n}", "func (f *MemKv) newPrefixWatcher(ctx context.Context, prefix string, fromVersion string) (*watcher, error) {\n\tif !strings.HasSuffix(prefix, \"/\") {\n\t\tprefix += \"/\"\n\t}\n\treturn f.watch(ctx, prefix, fromVersion, true)\n}", "func makeNode(cidr netip.Prefix) *node {\n\tn := new(node)\n\tn.cidr = cidr.Masked() // always store the prefix in canonical form\n\tn.recalc() // init the augmented field with recalc\n\treturn n\n}", "func generateKeyPrefixData(prefix []byte) []byte {\n\treturn append(prefix, []byte(\":data\")...)\n}", "func resourceNetboxIpamPrefixCreate(d *schema.ResourceData, meta interface{}) error {\n\tnetboxClient := meta.(*ProviderNetboxClient).client\n\n\tprefix := d.Get(\"prefix\").(string)\n\tdescription := d.Get(\"description\").(string)\n\tvrfID := int64(d.Get(\"vrf_id\").(int))\n\tisPool := d.Get(\"is_pool\").(bool)\n\t//status := d.Get(\"status\").(string)\n\ttenantID := int64(d.Get(\"tenant_id\").(int))\n\n\tvar parm = ipam.NewIPAMPrefixesCreateParams().WithData(\n\t\t&models.PrefixCreateUpdate{\n\t\t\tPrefix: &prefix,\n\t\t\tDescription: description,\n\t\t\tIsPool: isPool,\n\t\t\tTags: []string{},\n\t\t\tVrf: vrfID,\n\t\t\tTenant: tenantID,\n\t\t},\n\t)\n\n\tlog.Debugf(\"Executing IPAMPrefixesCreate against Netbox: %v\", parm)\n\n\tout, err := netboxClient.IPAM.IPAMPrefixesCreate(parm, nil)\n\n\tif err != nil {\n\t\tlog.Debugf(\"Failed to execute IPAMPrefixesCreate: %v\", err)\n\n\t\treturn err\n\t}\n\n\t// TODO Probably a better way to parse this ID\n\td.SetId(fmt.Sprintf(\"ipam/prefix/%d\", out.Payload.ID))\n\td.Set(\"prefix_id\", out.Payload.ID)\n\n\tlog.Debugf(\"Done Executing IPAMPrefixesCreate: %v\", out)\n\n\treturn nil\n}", "func newPrefix(targetPrefix []string) (prefixes []models.Prefix, err error) {\n\tprefixes = make([]models.Prefix, len(targetPrefix))\n\tfor i, cidr := range targetPrefix {\n\t\tprefix, err := models.NewPrefix(cidr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tprefixes[i] = prefix\n\t}\n\treturn\n}", "func (db *MemoryCache) NewIteratorWithPrefix(prefix []byte) Iterator {\n\tdb.lock.RLock()\n\tdefer db.lock.RUnlock()\n\n\tvar (\n\t\tpr = string(prefix)\n\t\tkeys = make([]string, 0, len(db.db))\n\t\tvalues = make([][]byte, 0, len(db.db))\n\t)\n\t// Collect the keys from the memory database corresponding to the given prefix\n\tfor key := range db.db {\n\t\tif strings.HasPrefix(key, pr) {\n\t\t\tkeys = append(keys, key)\n\t\t}\n\t}\n\t// Sort the items and retrieve the associated values\n\tsort.Strings(keys)\n\tfor _, key := range keys {\n\t\tvalues = append(values, db.db[key])\n\t}\n\treturn &iterator{\n\t\tkeys: keys,\n\t\tvalues: values,\n\t}\n}", "func newPrefixMutation(c config, op Op, opts ...prefixOption) *PrefixMutation {\n\tm := &PrefixMutation{\n\t\tconfig: c,\n\t\top: op,\n\t\ttyp: TypePrefix,\n\t\tclearedFields: make(map[string]struct{}),\n\t}\n\tfor _, opt := range opts {\n\t\topt(m)\n\t}\n\treturn m\n}", "func (recv *Node) Prepend(node *Node) *Node {\n\tc_node := (*C.GNode)(C.NULL)\n\tif node != nil {\n\t\tc_node = (*C.GNode)(node.ToC())\n\t}\n\n\tretC := C.g_node_prepend((*C.GNode)(recv.native), c_node)\n\tretGo := NodeNewFromC(unsafe.Pointer(retC))\n\n\treturn retGo\n}", "func NewPrefixer(prefix string) *Prefixer {\n\treturn &Prefixer{\n\t\tprefix: prefix,\n\t\tfirstLine: true,\n\t\tnewLine: false,\n\t\tescapeSeq: false,\n\t\tescapeBuf: \"\",\n\t}\n}", "func WithPrefix(p string) Opt {\n\treturn func(h *Backend) error {\n\t\th.prefix = p\n\n\t\tif h.prefix != \"\" {\n\t\t\th.prefixReady2Use = fmt.Sprintf(\"%s%c\", h.prefix, h.separator)\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func (b *NetworkBuilder) HostPrefix(value int) *NetworkBuilder {\n\tb.hostPrefix = value\n\tb.bitmap_ |= 1\n\treturn b\n}", "func Prefix(prefix []byte) *util.Range {\n\tif len(prefix) == 0 {\n\t\treturn nil\n\t}\n\n\tstart, limit := prefixKeyPrefix(prefix)\n\tif len(limit) == 0 {\n\t\treturn &util.Range{Start: start}\n\t}\n\treturn &util.Range{Start: start, Limit: limit}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetAllocator reserves an allocator used for bulk Lookup/Update/Delete operations.
func (idx *Tree) GetAllocator() *Allocator { return idx.allocators[idx.allocatorQueue.get()] }
[ "func GetRuntimePortAllocator() (*RuntimePortAllocator, error) {\n\tif rpa.pa == nil {\n\t\tif err := rpa.createAndRestorePortAllocator(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn rpa, nil\n}", "func NewAllocator(provider lCoreProvider) *Allocator {\n\treturn &Allocator{\n\t\tConfig: make(AllocConfig),\n\t\tprovider: provider,\n\t}\n}", "func NewAllocator(round uint64) Allocator {\n\ta := &allocator{round: round}\n\treturn a\n}", "func NewAllocator(store kv.Storage, dbID int64, isUnsigned bool) Allocator {\n\treturn &allocator{\n\t\tstore: store,\n\t\tdbID: dbID,\n\t\tisUnsigned: isUnsigned,\n\t\tstep: step,\n\t\tlastAllocTime: time.Now(),\n\t}\n}", "func NewAllocator(store kv.Storage, dbID int64) Allocator {\n\treturn &allocator{\n\t\tstore: store,\n\t\tdbID: dbID,\n\t}\n}", "func (a *ResourceAllocator) allocator() memory.Allocator {\n\tif a.Allocator == nil {\n\t\treturn DefaultAllocator\n\t}\n\treturn a.Allocator\n}", "func Allocator() pageframe.Allocator {\n\treturn &_buddyAllocator\n}", "func NewAllocator() *Allocator {\n\talloc := new(Allocator)\n\talloc.buffers = make([]sync.Pool, 17) // 1B -> 64K\n\tfor k := range alloc.buffers {\n\t\ti := k\n\t\talloc.buffers[k].New = func() interface{} {\n\t\t\treturn make([]byte, 1<<uint32(i))\n\t\t}\n\t}\n\treturn alloc\n}", "func (t *tableCommon) Allocator(ctx sessionctx.Context) autoid.Allocator {\n\ttrace_util_0.Count(_tables_00000, 322)\n\tif ctx != nil {\n\t\ttrace_util_0.Count(_tables_00000, 324)\n\t\tsessAlloc := ctx.GetSessionVars().IDAllocator\n\t\tif sessAlloc != nil {\n\t\t\ttrace_util_0.Count(_tables_00000, 325)\n\t\t\treturn sessAlloc\n\t\t}\n\t}\n\ttrace_util_0.Count(_tables_00000, 323)\n\treturn t.alloc\n}", "func NewAllocator(policy Policy) (*Allocator, error) {\n\terr := nvml.Init()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error initializing NVML: %v\", err)\n\t}\n\n\tdevices, err := NewDevices()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error enumerating GPU devices: %v\", err)\n\t}\n\n\tallocator := newAllocatorFrom(devices, policy)\n\n\truntime.SetFinalizer(allocator, func(allocator *Allocator) {\n\t\t// Explicitly ignore any errors from nvml.Shutdown().\n\t\t_ = nvml.Shutdown()\n\t})\n\n\treturn allocator, nil\n}", "func newAllocator() *allocator {\n\ta := new(allocator)\n\ta.base.Init()\n\treturn a\n}", "func (a *allocator) Alloc() virtualizers.Virtualizer {\n\treturn new(Virtualizer)\n}", "func dataSourceAddressAllocatorRead(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*APIClient)\n\tname := d.Get(\"name\").(string)\n\tnamespace := d.Get(\"namespace\").(string)\n\n\trspFmt := server.GetSpecForm\n\tmsgFQN := strings.Replace(ves_io_schema_addr_allocator.ObjectType, \"Object\", \"GetResponse\", 1)\n\tprotoMsgType := proto.MessageType(msgFQN).Elem()\n\tprotoMsg := reflect.New(protoMsgType).Interface().(proto.Message)\n\tcallRsp := &server.CallResponse{\n\t\tProtoMsg: protoMsg,\n\t}\n\topts := []vesapi.CallOpt{\n\t\tvesapi.WithResponseFormat(rspFmt),\n\t\tvesapi.WithOutCallResponse(callRsp),\n\t}\n\tresp, err := client.GetObject(context.Background(), ves_io_schema_addr_allocator.ObjectType, namespace, name, opts...)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error finding Volterra Address Allocator %s: %s\", name, err)\n\t}\n\n\tspecResp := callRsp.ProtoMsg.(*ves_io_schema_addr_allocator.GetResponse)\n\tmode := ves_io_schema_addr_allocator.AllocatorMode_name[int32(specResp.Spec.Mode)]\n\n\td.Set(\"mode\", mode)\n\td.Set(\"allocation_map\", make(map[string]string))\n\t// TODO set allocation map with right values, once schema is updated\n\td.SetId(resp.GetObjSystemMetadata().GetUid())\n\treturn nil\n}", "func (alloc *inMemoryAllocator) GetType() AllocatorType {\n\treturn alloc.allocType\n}", "func GetAllocation(allocID string, cb GetInfoCallback) (err error) {\n\tif err = CheckConfig(); err != nil {\n\t\treturn\n\t}\n\tvar url = withParams(STORAGESC_GET_ALLOCATION, Params{\n\t\t\"allocation\": allocID,\n\t})\n\tgo GetInfoFromSharders(url, OpStorageSCGetAllocation, cb)\n\treturn\n}", "func (p *ResourcePool) Alloc(ctx context.Context, id string) (Alloc, error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tif alloc, ok := p.allocs[id]; ok {\n\t\treturn alloc, nil\n\t}\n\treturn nil, errors.E(\"alloc\", id, errors.NotExist)\n}", "func (t *cpuTreeNode) NewAllocator(options cpuTreeAllocatorOptions) *cpuTreeAllocator {\n\tta := &cpuTreeAllocator{\n\t\troot: t,\n\t\toptions: options,\n\t}\n\treturn ta\n}", "func (idx *Tree) ReleaseAllocator(a *Allocator) {\n\tidx.allocatorQueue.put(a.id)\n}", "func (m *Manager) GetAllocation(fiveTuple *FiveTuple) *Allocation {\n\tm.lock.RLock()\n\tdefer m.lock.RUnlock()\n\treturn m.allocations[fiveTuple.Fingerprint()]\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ReleaseAllocator returns an allocator previously reserved using GetAllocator
func (idx *Tree) ReleaseAllocator(a *Allocator) { idx.allocatorQueue.put(a.id) }
[ "func (allocator *Allocator) ReleaseAllocator() {\n\tC.zj_AllocatorRelease(allocator.A)\n}", "func (a *ResourceAllocator) Free(b []byte) {\n\tif a == nil {\n\t\tDefaultAllocator.Free(b)\n\t\treturn\n\t}\n\n\tsize := len(b)\n\n\t// Release the memory to the allocator first.\n\talloc := a.allocator()\n\talloc.Free(b)\n\n\t// Release the memory in our accounting.\n\tatomic.AddInt64(&a.bytesAllocated, int64(-size))\n}", "func (r *Allocator) Release(id uint32) error {\n\tok, offset := r.netIDRange.Contains(id)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn r.alloc.Release(int(offset))\n}", "func (stack *StackAllocator) release() {\n\tstack.alloc = 0\n}", "func (a *Allocator) Destroy() {\n\tC.vmaDestroyAllocator(a.cAlloc)\n}", "func NewAllocator(round uint64) Allocator {\n\ta := &allocator{round: round}\n\treturn a\n}", "func (a *Allocated) Release() (errs []kv.Error) {\n\n\terrs = []kv.Error{}\n\n\tif a == nil {\n\t\treturn []kv.Error{kv.NewError(\"unexpected nil supplied for the release of resources\").With(\"stack\", stack.Trace().TrimRuntime())}\n\t}\n\n\tfor _, gpuAlloc := range a.GPU {\n\t\tif e := ReturnGPU(gpuAlloc); e != nil {\n\t\t\terrs = append(errs, e)\n\t\t}\n\t}\n\n\tif a.CPU != nil {\n\t\ta.CPU.Release()\n\t}\n\n\tif a.Disk != nil {\n\t\tif err := a.Disk.Release(); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t} else {\n\t\terrs = append(errs, kv.NewError(\"disk block missing\").With(\"stack\", stack.Trace().TrimRuntime()))\n\t}\n\n\treturn errs\n}", "func (c *crdBackend) Release(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) (err error) {\n\t// For CiliumIdentity-based allocation, the reference counting is\n\t// handled via CiliumEndpoint. Any CiliumEndpoint referring to a\n\t// CiliumIdentity will keep the CiliumIdentity alive. No action is\n\t// needed to release the reference here.\n\treturn nil\n}", "func NewAllocator() *Allocator {\n\talloc := new(Allocator)\n\talloc.buffers = make([]sync.Pool, 17) // 1B -> 64K\n\tfor k := range alloc.buffers {\n\t\ti := k\n\t\talloc.buffers[k].New = func() interface{} {\n\t\t\treturn make([]byte, 1<<uint32(i))\n\t\t}\n\t}\n\treturn alloc\n}", "func (mm *MMapRWManager) Release() (err error) {\n\tmm.fdm.reduceUsing(mm.path)\n\treturn mm.m.Unmap()\n}", "func newAllocator() *allocator {\n\ta := new(allocator)\n\ta.base.Init()\n\treturn a\n}", "func (p *ResourcePool) Free(a Alloc) error {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\treturn p.doFree(a)\n}", "func (idx *Tree) GetAllocator() *Allocator {\n\treturn idx.allocators[idx.allocatorQueue.get()]\n}", "func NewAllocator(policy Policy) (*Allocator, error) {\n\terr := nvml.Init()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error initializing NVML: %v\", err)\n\t}\n\n\tdevices, err := NewDevices()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error enumerating GPU devices: %v\", err)\n\t}\n\n\tallocator := newAllocatorFrom(devices, policy)\n\n\truntime.SetFinalizer(allocator, func(allocator *Allocator) {\n\t\t// Explicitly ignore any errors from nvml.Shutdown().\n\t\t_ = nvml.Shutdown()\n\t})\n\n\treturn allocator, nil\n}", "func (s *ControllerPool) Release(controllerName string, controller interface{}) {\n\ts.mu.RLock()\n\tpool, ok := s.poolMap[controllerName]\n\ts.mu.RUnlock()\n\tif !ok {\n\t\tpanic(\"unknown controller name\")\n\t}\n\tDiFree(controller)\n\tpool.Put(controller)\n\n}", "func NewAllocator(provider lCoreProvider) *Allocator {\n\treturn &Allocator{\n\t\tConfig: make(AllocConfig),\n\t\tprovider: provider,\n\t}\n}", "func GetRuntimePortAllocator() (*RuntimePortAllocator, error) {\n\tif rpa.pa == nil {\n\t\tif err := rpa.createAndRestorePortAllocator(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn rpa, nil\n}", "func (r *PortAllocator) Destroy() {\n\tr.alloc.Destroy()\n}", "func NewAllocator(store kv.Storage, dbID int64, isUnsigned bool) Allocator {\n\treturn &allocator{\n\t\tstore: store,\n\t\tdbID: dbID,\n\t\tisUnsigned: isUnsigned,\n\t\tstep: step,\n\t\tlastAllocTime: time.Now(),\n\t}\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
PrepareUpdate reserves an allocator and uses it to prepare an update operation. See Allocator.PrepareUpdate for details
func (idx *Tree) PrepareUpdate(key []byte) (found bool, op *UpdateOperation) { id := idx.allocatorQueue.get() op = newUpdateOperation(idx, idx.allocators[id], true) return op.prepareUpdate(key), op }
[ "func (t *Table) PrepareUpdate(tu fibdef.Update) (*UpdateCommand, error) {\n\tu := &UpdateCommand{}\n\tu.real.RealUpdate = tu.Real()\n\tu.virt.VirtUpdate = tu.Virt()\n\n\tu.allocSplit = u.real.prepare(t)\n\tu.allocated = make([]*Entry, u.allocSplit+u.virt.prepare(t))\n\tif e := t.allocBulk(u.allocated); e != nil {\n\t\treturn nil, e\n\t}\n\n\treturn u, nil\n}", "func AllocateUpdate() int {\n\tupdateIdGen++\n\treturn updateIdGen\n}", "func (s DirectorBindStatusStrategy) PrepareForUpdate(ctx request.Context, obj, old runtime.Object) {\n\ts.DefaultStatusStorageStrategy.PrepareForUpdate(ctx, obj, old)\n\tdNew := obj.(*bind.DirectorBind)\n\tlabels := dNew.GetObjectMeta().GetLabels()\n\tif labels == nil {\n\t\tlabels = make(map[string]string)\n\t\tdNew.GetObjectMeta().SetLabels(labels)\n\t}\n\tlabels[\"state\"] = dNew.Status.State\n}", "func PreparePodUpdate(nodeName string, local, remote *corev1.Pod) {\n\t// TODO(negz): Allow updating container images.\n\n\t// Run PrepareObjectMeta on a copy of the local pod to ensure we maintain\n\t// any AK-managed labels and annotations when we propagate the local pod's\n\t// labels and annotations to the remote pod.\n\tl := local.DeepCopy()\n\tPrepareObjectMeta(nodeName, l)\n\n\tremote.SetLabels(l.GetLabels())\n\tremote.SetAnnotations(l.GetAnnotations())\n}", "func (detailsStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {\n\tnewBuild := obj.(*buildapi.Build)\n\toldBuild := old.(*buildapi.Build)\n\n\t// ignore phase updates unless the caller is updating the build to\n\t// a completed phase.\n\tphase := oldBuild.Status.Phase\n\tstages := newBuild.Status.Stages\n\tif buildinternalhelpers.IsBuildComplete(newBuild) {\n\t\tphase = newBuild.Status.Phase\n\t}\n\trevision := newBuild.Spec.Revision\n\tmessage := newBuild.Status.Message\n\treason := newBuild.Status.Reason\n\toutputTo := newBuild.Status.Output.To\n\t*newBuild = *oldBuild\n\tnewBuild.Status.Phase = phase\n\tnewBuild.Status.Stages = stages\n\tnewBuild.Spec.Revision = revision\n\tnewBuild.Status.Reason = reason\n\tnewBuild.Status.Message = message\n\tnewBuild.Status.Output.To = outputTo\n}", "func (b *AllocUpdateBatcher) CreateUpdate(allocs map[string]*structs.DesiredTransition, eval *structs.Evaluation) *BatchFuture {\n\twrapper := &updateWrapper{\n\t\tallocs: allocs,\n\t\te: eval,\n\t\tf: make(chan *BatchFuture, 1),\n\t}\n\n\tb.workCh <- wrapper\n\treturn <-wrapper.f\n}", "func (s *ReleaseServer) prepareUpdate(req *services.UpdateReleaseRequest) (*release.Release, *release.Release, error) {\n\tif req.Chart == nil {\n\t\treturn nil, nil, errMissingChart\n\t}\n\n\t// finds the deployed release with the given name\n\tcurrentRelease, err := s.env.Releases.Deployed(req.Name)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// determine if values will be reused\n\tif err := s.reuseValues(req, currentRelease); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// finds the non-deleted release with the given name\n\tlastRelease, err := s.env.Releases.Last(req.Name)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// Increment revision count. This is passed to templates, and also stored on\n\t// the release object.\n\trevision := lastRelease.Version + 1\n\n\tts := timeconv.Now()\n\toptions := chartutil.ReleaseOptions{\n\t\tName: req.Name,\n\t\tTime: ts,\n\t\tNamespace: currentRelease.Namespace,\n\t\tIsUpgrade: true,\n\t\tRevision: int(revision),\n\t}\n\n\tcaps, err := capabilities(s.clientset.Discovery())\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tvaluesToRender, err := chartutil.ToRenderValuesCaps(req.Chart, req.Values, options, caps)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\thooks, manifestDoc, notesTxt, err := s.renderResources(req.Chart, valuesToRender, caps.APIVersions)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// Store an updated release.\n\tupdatedRelease := &release.Release{\n\t\tName: req.Name,\n\t\tNamespace: currentRelease.Namespace,\n\t\tChart: req.Chart,\n\t\tConfig: req.Values,\n\t\tInfo: &release.Info{\n\t\t\tFirstDeployed: currentRelease.Info.FirstDeployed,\n\t\t\tLastDeployed: ts,\n\t\t\tStatus: &release.Status{Code: release.Status_PENDING_UPGRADE},\n\t\t\tDescription: \"Preparing upgrade\", // This should be overwritten later.\n\t\t},\n\t\tVersion: revision,\n\t\tManifest: manifestDoc.String(),\n\t\tHooks: hooks,\n\t}\n\n\tif len(notesTxt) > 0 {\n\t\tupdatedRelease.Info.Status.Notes = notesTxt\n\t}\n\terr = validateManifest(s.env.KubeClient, currentRelease.Namespace, manifestDoc.Bytes())\n\treturn currentRelease, updatedRelease, err\n}", "func (accountStrategy) PrepareForUpdate(ctx genericapirequest.Context, obj, old runtime.Object) {\n}", "func (strategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {\n\t_ = obj.(*authorizationapi.RoleBindingRestriction)\n\t_ = old.(*authorizationapi.RoleBindingRestriction)\n}", "func (idx *Tree) PrepareDelete(key []byte) (found bool, op *DeleteOperation) {\n\tid := idx.allocatorQueue.get()\n\top = newDeleteOperation(idx, idx.allocators[id], true)\n\tif op.prepare(key) {\n\t\treturn true, op\n\t}\n\top.Abort()\n\treturn false, nil\n}", "func (persistentvolumeclaimStatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {\n\tnewPVC := obj.(*api.PersistentVolumeClaim)\n\toldPVC := old.(*api.PersistentVolumeClaim)\n\tnewPVC.Spec = oldPVC.Spec\n\tpvcutil.DropDisabledFieldsFromStatus(newPVC, oldPVC)\n}", "func (client BaseClient) UpdateAddressPreparer(ctx context.Context, addressName string, resourceGroupName string, addressUpdateParameter AddressUpdateParameter, ifMatch string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"addressName\": autorest.Encode(\"path\", addressName),\n\t\t\"resourceGroupName\": autorest.Encode(\"path\", resourceGroupName),\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t}\n\n\tconst APIVersion = \"2020-12-01-preview\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsContentType(\"application/json; charset=utf-8\"),\n\t\tautorest.AsPatch(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EdgeOrder/addresses/{addressName}\", pathParameters),\n\t\tautorest.WithJSON(addressUpdateParameter),\n\t\tautorest.WithQueryParameters(queryParameters))\n\tif len(ifMatch) > 0 {\n\t\tpreparer = autorest.DecoratePreparer(preparer,\n\t\t\tautorest.WithHeader(\"If-Match\", autorest.String(ifMatch)))\n\t}\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func NewAllocUpdateBatcher(ctx context.Context, batchDuration time.Duration, raft DeploymentRaftEndpoints) *AllocUpdateBatcher {\n\tb := &AllocUpdateBatcher{\n\t\tbatch: batchDuration,\n\t\traft: raft,\n\t\tctx: ctx,\n\t\tworkCh: make(chan *updateWrapper, 10),\n\t}\n\n\tgo b.batcher()\n\treturn b\n}", "func (statefulSetStatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {\n\tnewStatefulSet := obj.(*apps.StatefulSet)\n\toldStatefulSet := old.(*apps.StatefulSet)\n\t// status changes are not allowed to update spec\n\tnewStatefulSet.Spec = oldStatefulSet.Spec\n}", "func (client QuotaRequestClient) UpdatePreparer(ctx context.Context, subscriptionID string, providerID string, location string, resourceName string, createQuotaRequest CurrentQuotaLimitBase, ifMatch string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"location\": autorest.Encode(\"path\", location),\n\t\t\"providerId\": autorest.Encode(\"path\", providerID),\n\t\t\"resourceName\": autorest.Encode(\"path\", resourceName),\n\t\t\"subscriptionId\": autorest.Encode(\"path\", subscriptionID),\n\t}\n\n\tconst APIVersion = \"2019-07-19-preview\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsContentType(\"application/json; charset=utf-8\"),\n\t\tautorest.AsPatch(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/providers/Microsoft.Capacity/resourceProviders/{providerId}/locations/{location}/serviceLimits/{resourceName}\", pathParameters),\n\t\tautorest.WithJSON(createQuotaRequest),\n\t\tautorest.WithQueryParameters(queryParameters),\n\t\tautorest.WithHeader(\"If-Match\", autorest.String(ifMatch)))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (m *Manager) updateAllocs(keep map[ids.BroadcastID]bool, new map[ids.BroadcastID]*bInfo) []*bInfo {\n\tm.l.Lock()\n\tdefer m.l.Unlock()\n\n\tvar ret []*bInfo\n\n\t// Make a pass through m.infos, deleting anything not in keep or new.\n\tfor _, info := range m.infos {\n\t\tif keep[info.bID] {\n\t\t\tcontinue\n\t\t}\n\t\tif new[info.bID] == nil {\n\t\t\tret = append(ret, info)\n\t\t\tdelete(m.infos, info.bID)\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t// Make a pass through new, updating m.infos.\n\tfor id, info := range new {\n\t\tif m.infos[id] != nil {\n\t\t\tret = append(ret, m.infos[id])\n\t\t}\n\t\tm.infos[id] = info\n\t}\n\n\treturn ret\n}", "func (client CassandraResourcesClient) CreateUpdateCassandraTablePreparer(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string, tableName string, createUpdateCassandraTableParameters CassandraTableCreateUpdateParameters) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"accountName\": autorest.Encode(\"path\", accountName),\n\t\t\"keyspaceName\": autorest.Encode(\"path\", keyspaceName),\n\t\t\"resourceGroupName\": autorest.Encode(\"path\", resourceGroupName),\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t\t\"tableName\": autorest.Encode(\"path\", tableName),\n\t}\n\n\tconst APIVersion = \"2021-10-15\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsContentType(\"application/json; charset=utf-8\"),\n\t\tautorest.AsPut(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}\", pathParameters),\n\t\tautorest.WithJSON(createUpdateCassandraTableParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (b *CompactableBuffer) Update(address *EntryAddress, data []byte) error {\n\taddress.LockForWrite()\n\tdefer address.UnlockWrite()\n\theader, err := b.ReadHeader(address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbeforeUpdataDataSize := header.dataSize\n\tafterUpdateDataSize := len(data) + VarIntSize(len(data))\n\tdataSizeDelta := afterUpdateDataSize - int(beforeUpdataDataSize)\n\n\tremainingSpace := int(header.entrySize) - reservedSize - afterUpdateDataSize\n\theader.dataSize = int64(afterUpdateDataSize)\n\tif remainingSpace <= 0 {\n\t\tatomic.AddInt64(&b.dataSize, int64(-beforeUpdataDataSize))\n\t\tatomic.AddInt64(&b.entrySize, int64(-header.entrySize))\n\t\treturn b.expand(address, data)\n\t}\n\n\tatomic.AddInt64(&b.dataSize, int64(dataSizeDelta))\n\tvar target = make([]byte, 0)\n\tAppendToBytes(data, &target)\n\tif len(target) > int(header.dataSize) {\n\t\treturn io.EOF\n\t}\n\twritableBuffer := b.writableBuffer()\n\t_, err = writableBuffer.Write(address.Position()+reservedSize, target...)\n\treturn err\n}", "func NewUpdate(zone string, class uint16) *Msg {\n\tu := new(Msg)\n\tu.MsgHdr.Response = false\n\tu.MsgHdr.Opcode = OpcodeUpdate\n\tu.Compress = false // Seems BIND9 at least cannot handle compressed update pkgs\n\tu.Question = make([]Question, 1)\n\tu.Question[0] = Question{zone, TypeSOA, class}\n\treturn u\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
PrepareDelete reserves an allocator and uses it to prepare a delete operation. See Allocator.PrepareDelete for details
func (idx *Tree) PrepareDelete(key []byte) (found bool, op *DeleteOperation) { id := idx.allocatorQueue.get() op = newDeleteOperation(idx, idx.allocators[id], true) if op.prepare(key) { return true, op } op.Abort() return false, nil }
[ "func (client DevicesClient) DeletePreparer(ctx context.Context, deviceName string, resourceGroupName string, managerName string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"deviceName\": deviceName,\n\t\t\"managerName\": managerName,\n\t\t\"resourceGroupName\": resourceGroupName,\n\t\t\"subscriptionId\": client.SubscriptionID,\n\t}\n\n\tconst APIVersion = \"2017-06-01\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsDelete(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorSimple/managers/{managerName}/devices/{deviceName}\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (c CognitiveServicesAccountsClient) preparerForDeletedAccountsPurge(ctx context.Context, id DeletedAccountId) (*http.Request, error) {\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": defaultApiVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsContentType(\"application/json; charset=utf-8\"),\n\t\tautorest.AsDelete(),\n\t\tautorest.WithBaseURL(c.baseUri),\n\t\tautorest.WithPath(id.ID()),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (vfs *VirtualFilesystem) PrepareDeleteDentry(mntns *MountNamespace, d *Dentry) error {\n\tif checkInvariants {\n\t\tif d.parent == nil {\n\t\t\tpanic(\"d is independent\")\n\t\t}\n\t\tif d.IsDisowned() {\n\t\t\tpanic(\"d is already disowned\")\n\t\t}\n\t}\n\tvfs.mountMu.Lock()\n\tif mntns.mountpoints[d] != 0 {\n\t\tvfs.mountMu.Unlock()\n\t\treturn syserror.EBUSY\n\t}\n\td.mu.Lock()\n\tvfs.mountMu.Unlock()\n\t// Return with d.mu locked to block attempts to mount over it; it will be\n\t// unlocked by AbortDeleteDentry or CommitDeleteDentry.\n\treturn nil\n}", "func (client *CassandraClustersClient) deallocateCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *CassandraClustersClientBeginDeallocateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/cassandraClusters/{clusterName}/deallocate\"\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif clusterName == \"\" {\n\t\treturn nil, errors.New(\"parameter clusterName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{clusterName}\", url.PathEscape(clusterName))\n\treq, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2023-03-15-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, nil\n}", "func MakeDeleter(\n\tcodec keys.SQLCodec, tableDesc catalog.TableDescriptor, requestedCols []descpb.ColumnDescriptor,\n) Deleter {\n\tindexes := tableDesc.DeletableNonPrimaryIndexes()\n\tindexDescs := make([]descpb.IndexDescriptor, len(indexes))\n\tfor i, index := range indexes {\n\t\tindexDescs[i] = *index.IndexDesc()\n\t}\n\n\tvar fetchCols []descpb.ColumnDescriptor\n\tvar fetchColIDtoRowIndex catalog.TableColMap\n\tif requestedCols != nil {\n\t\tfetchCols = requestedCols[:len(requestedCols):len(requestedCols)]\n\t\tfetchColIDtoRowIndex = ColIDtoRowIndexFromCols(fetchCols)\n\t} else {\n\t\tmaybeAddCol := func(colID descpb.ColumnID) error {\n\t\t\tif _, ok := fetchColIDtoRowIndex.Get(colID); !ok {\n\t\t\t\tcol, err := tableDesc.FindColumnWithID(colID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfetchColIDtoRowIndex.Set(col.GetID(), len(fetchCols))\n\t\t\t\tfetchCols = append(fetchCols, *col.ColumnDesc())\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tfor j := 0; j < tableDesc.GetPrimaryIndex().NumColumns(); j++ {\n\t\t\tcolID := tableDesc.GetPrimaryIndex().GetColumnID(j)\n\t\t\tif err := maybeAddCol(colID); err != nil {\n\t\t\t\treturn Deleter{}\n\t\t\t}\n\t\t}\n\t\tfor _, index := range indexes {\n\t\t\tfor j := 0; j < index.NumColumns(); j++ {\n\t\t\t\tcolID := index.GetColumnID(j)\n\t\t\t\tif err := maybeAddCol(colID); err != nil {\n\t\t\t\t\treturn Deleter{}\n\t\t\t\t}\n\t\t\t}\n\t\t\t// The extra columns are needed to fix #14601.\n\t\t\tfor j := 0; j < index.NumExtraColumns(); j++ {\n\t\t\t\tcolID := index.GetExtraColumnID(j)\n\t\t\t\tif err := maybeAddCol(colID); err != nil {\n\t\t\t\t\treturn Deleter{}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\trd := Deleter{\n\t\tHelper: newRowHelper(codec, tableDesc, indexDescs),\n\t\tFetchCols: fetchCols,\n\t\tFetchColIDtoRowIndex: fetchColIDtoRowIndex,\n\t}\n\n\treturn rd\n}", "func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"instanceId\": autorest.Encode(\"path\", instanceID),\n\t\t\"resourceGroupName\": autorest.Encode(\"path\", resourceGroupName),\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t\t\"vmScaleSetName\": autorest.Encode(\"path\", VMScaleSetName),\n\t}\n\n\tconst APIVersion = \"2019-12-01\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsPost(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/deallocate\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (m *MockInterface) PrepareDeleteRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx}\n\tfor _, a := range decorators {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"PrepareDeleteRequest\", varargs...)\n\tret0, _ := ret[0].(*http.Request)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (client GroupClient) DeleteSecretPreparer(accountName string, databaseName string, secretName string) (*http.Request, error) {\n\turlParameters := map[string]interface{}{\n\t\t\"accountName\": accountName,\n\t\t\"adlaCatalogDnsSuffix\": client.AdlaCatalogDNSSuffix,\n\t}\n\n\tpathParameters := map[string]interface{}{\n\t\t\"databaseName\": autorest.Encode(\"path\", databaseName),\n\t\t\"secretName\": autorest.Encode(\"path\", secretName),\n\t}\n\n\tconst APIVersion = \"2015-10-01-preview\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsDelete(),\n\t\tautorest.WithCustomBaseURL(\"https://{accountName}.{adlaCatalogDnsSuffix}\", urlParameters),\n\t\tautorest.WithPathParameters(\"/catalog/usql/databases/{databaseName}/secrets/{secretName}\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare(&http.Request{})\n}", "func (mr *MockInterfaceMockRecorder) PrepareDeleteRequest(ctx interface{}, decorators ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{ctx}, decorators...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"PrepareDeleteRequest\", reflect.TypeOf((*MockInterface)(nil).PrepareDeleteRequest), varargs...)\n}", "func prepareDeletecluster(params map[string]string, clusterName string, Region string) {\n\tif clusterName != \"\" {\n\t\tparams[\"clusterName\"] = clusterName\n\t}\n\tif Region != \"\" {\n\t\tparams[\"Region\"] = Region\n\t}\n\tparams[\"amztarget\"] = \"AmazonEC2ContainerServiceV20141113.DeleteCluster\"\n}", "func (oo *OmciCC) PrepareForGarbageCollection(ctx context.Context, aDeviceID string) {\n\tlogger.Debugw(ctx, \"prepare for garbage collection\", log.Fields{\"device-id\": aDeviceID})\n\too.pBaseDeviceHandler = nil\n\too.pOnuDeviceEntry = nil\n\too.pOnuAlarmManager = nil\n}", "func preparedeleteserviceparams(params map[string]string, deleteservice Deleteservice, Region string) {\n\tif Region != \"\" {\n\t\tparams[\"Region\"] = Region\n\t}\n\tparams[\"amztarget\"] = \"AmazonEC2ContainerServiceV20141113.DeleteService\"\n}", "func (client FileClient) DeletePreparer(ctx context.Context, tempRequestID string, xNCPLANG string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"tempRequestId\": autorest.Encode(\"path\", tempRequestID),\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsDelete(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/api/v1/files/{tempRequestId}\", pathParameters))\n\tif len(xNCPLANG) > 0 {\n\t\tpreparer = autorest.DecoratePreparer(preparer,\n\t\t\tautorest.WithHeader(\"X-NCP-LANG\", autorest.String(xNCPLANG)))\n\t}\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (client HTTPSuccessClient) Delete202Preparer(booleanValue *bool) (*http.Request, error) {\n preparer := autorest.CreatePreparer(\n autorest.AsJSON(),\n autorest.AsDelete(),\n autorest.WithBaseURL(client.BaseURI),\n autorest.WithPath(\"/http/success/202\"))\n if booleanValue != nil {\n preparer = autorest.DecoratePreparer(preparer,\n autorest.WithJSON(booleanValue))\n }\n return preparer.Prepare(&http.Request{})\n}", "func (c SqlDedicatedGatewayClient) preparerForServiceDelete(ctx context.Context, id ServiceId) (*http.Request, error) {\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": defaultApiVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsContentType(\"application/json; charset=utf-8\"),\n\t\tautorest.AsDelete(),\n\t\tautorest.WithBaseURL(c.baseUri),\n\t\tautorest.WithPath(id.ID()),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (client CassandraResourcesClient) DeleteCassandraTablePreparer(ctx context.Context, resourceGroupName string, accountName string, keyspaceName string, tableName string) (*http.Request, error) {\n\tpathParameters := map[string]interface{}{\n\t\t\"accountName\": autorest.Encode(\"path\", accountName),\n\t\t\"keyspaceName\": autorest.Encode(\"path\", keyspaceName),\n\t\t\"resourceGroupName\": autorest.Encode(\"path\", resourceGroupName),\n\t\t\"subscriptionId\": autorest.Encode(\"path\", client.SubscriptionID),\n\t\t\"tableName\": autorest.Encode(\"path\", tableName),\n\t}\n\n\tconst APIVersion = \"2021-10-15\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsDelete(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPathParameters(\"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName}\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (idx *Tree) PrepareUpdate(key []byte) (found bool, op *UpdateOperation) {\n\tid := idx.allocatorQueue.get()\n\top = newUpdateOperation(idx, idx.allocators[id], true)\n\treturn op.prepareUpdate(key), op\n}", "func (client ManagementClient) DeleteSecretPreparer(vaultBaseURL string, secretName string) (*http.Request, error) {\n\turlParameters := map[string]interface{}{\n\t\t\"vaultBaseUrl\": vaultBaseURL,\n\t}\n\n\tpathParameters := map[string]interface{}{\n\t\t\"secret-name\": autorest.Encode(\"path\", secretName),\n\t}\n\n\tconst APIVersion = \"2015-06-01\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsDelete(),\n\t\tautorest.WithCustomBaseURL(\"{vaultBaseUrl}\", urlParameters),\n\t\tautorest.WithPathParameters(\"/secrets/{secret-name}\", pathParameters),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare(&http.Request{})\n}", "func (oo *OnuDeviceEntry) PrepareForGarbageCollection(ctx context.Context, aDeviceID string) {\n\tlogger.Debugw(ctx, \"prepare for garbage collection\", log.Fields{\"device-id\": aDeviceID})\n\too.baseDeviceHandler = nil\n\too.pOnuTP = nil\n\tif oo.PDevOmciCC != nil {\n\t\too.PDevOmciCC.PrepareForGarbageCollection(ctx, aDeviceID)\n\t}\n\too.PDevOmciCC = nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
MaxKey returns the maximum key having the given searchPrefix, or the maximum key in the whole index if searchIndex is nil. Maximum means the last key in the lexicographic order. If keys are uint64 in BigEndian it is also the largest number. If ok is false the index is empty. For example, if we store temperature readings using the key "temp_TIMESTAMP" where timestamp is an 8 byte BigEndian ns timestamp MaxKey([]byte("temp_")) returns the last made reading.
func (idx *Tree) MaxKey(searchPrefix []byte) (v uint64, ok bool) { raw, _ := idx.partialSearch(searchPrefix) if raw == 0 { return 0, false } if isLeaf(raw) { return getLeafValue(raw), true } // now find the max searchLoop: for { _, node, count, prefixLen := explodeNode(raw) block := int(node >> blockSlotsShift) offset := int(node & blockSlotsOffsetMask) data := idx.blocks[block].data[offset:] var prefixSlots int if prefixLen > 0 { if prefixLen == 255 { prefixLen = int(data[0]) prefixSlots = (prefixLen + 15) >> 3 } else { prefixSlots = (prefixLen + 7) >> 3 } data = data[prefixSlots:] } if count >= fullAllocFrom { // find max, iterate from top for k := 255; k >= 0; k-- { a := atomic.LoadUint64(&data[k]) if a != 0 { if isLeaf(a) { return getLeafValue(a), true } raw = a continue searchLoop } } // BUG: this might happen if all children in the node has been deleted, since we currently don't shrink node-256. we should go back in the tree! return 0, false } // load the last child (since they are ordered) a := atomic.LoadUint64(&data[count-1]) if isLeaf(a) { return getLeafValue(a), true } raw = a } }
[ "func GetMaxIndexKey(shardID uint64, key []byte) []byte {\n\tkey = getKeySlice(key, idKeyLength)\n\treturn getIDKey(maxIndexSuffix, shardID, key)\n}", "func MaxKey() Val { return Val{t: bsontype.MaxKey} }", "func (b *storeBuilder) MaxKey() uint32 {\n\treturn b.maxKey\n}", "func MaxKey() uint64 {\n\treturn config.maxKey\n}", "func (_KeyBroadcastContract *KeyBroadcastContractCallerSession) GetBestKey(startBatchIndex uint64) ([]byte, error) {\n\treturn _KeyBroadcastContract.Contract.GetBestKey(&_KeyBroadcastContract.CallOpts, startBatchIndex)\n}", "func (c *RecordIndexedSet) Max(idxname, key string, compare IndexDesc) []string {\n\tcomp, err := c.data.GetHeader().NewKeyGenerator(compare.cols...)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"RecordIndexedSet.Max: %s\", err.Error()))\n\t}\n\tsubset := c.GetRecordsByIndexKey(idxname, key)\n\tif len(subset) == 0 {\n\t\treturn nil\n\t}\n\tif len(subset) == 1 {\n\t\treturn subset[0]\n\t}\n\tmax := 0\n\tfor j := 1; j < len(subset); j++ {\n\t\tif comp(subset[max]) < comp(subset[j]) {\n\t\t\tmax = j\n\t\t}\n\t}\n\treturn subset[max]\n}", "func Max(key []byte, nodes []*memberlist.Node) (max *memberlist.Node) {\n\tmaxValue := big.NewInt(0)\n\n\tCompute(key, nodes, func(node *memberlist.Node, bi *big.Int) {\n\t\tif bi.Cmp(maxValue) == 1 {\n\t\t\tmaxValue = bi\n\t\t\tmax = node\n\t\t}\n\t})\n\n\treturn max\n}", "func (_KeyBroadcastContract *KeyBroadcastContractCaller) GetBestKey(opts *bind.CallOpts, startBatchIndex uint64) ([]byte, error) {\n\tvar out []interface{}\n\terr := _KeyBroadcastContract.contract.Call(opts, &out, \"getBestKey\", startBatchIndex)\n\n\tif err != nil {\n\t\treturn *new([]byte), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte)\n\n\treturn out0, err\n\n}", "func (ctx *DataContext) MaxVersionKey(tk TKey) (Key, error) {\n\tkey := append([]byte{dataKeyPrefix}, ctx.data.InstanceID().Bytes()...)\n\tkey = append(key, tk...)\n\tkey = append(key, dvid.VersionID(dvid.MaxVersionID).Bytes()...)\n\tkey = append(key, dvid.ClientID(dvid.MaxClientID).Bytes()...)\n\treturn append(key, 0xFF), nil\n}", "func (m *SectorMap) GetMaxKey() int64 {\n\tvar maxKey int64\n\tfor k := range m.ObjUtilizations {\n\t\tif k > maxKey {\n\t\t\tmaxKey = k\n\t\t}\n\t}\n\n\treturn maxKey\n}", "func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) {\n\tvar last *leafNode\n\tsearch := k\n\tfor {\n\t\t// Look for a leaf node\n\t\tif n.isLeaf() {\n\t\t\tlast = n.leaf\n\t\t}\n\n\t\t// Check for key exhaution\n\t\tif len(search) == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\t// Look for an edge\n\t\t_, n = n.getEdge(search[0])\n\t\tif n == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Consume the search prefix\n\t\tif bytes.HasPrefix(search, n.prefix) {\n\t\t\tsearch = search[len(n.prefix):]\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tif last != nil {\n\t\treturn last.key, last.val, true\n\t}\n\treturn nil, nil, false\n}", "func (this *AllOne) GetMaxKey() string {\n if len(this.m) == 0{\n return \"\"\n }\n return this.g[this.max][1].k\n}", "func (llvrw *ValueReaderWriter) ReadMaxKey() error {\n\tllvrw.checkdepth()\n\tllvrw.Invoked = ReadMaxKey\n\tif llvrw.ErrAfter == llvrw.Invoked {\n\t\treturn llvrw.Err\n\t}\n\n\treturn nil\n}", "func (_KeyBroadcastContract *KeyBroadcastContractCallerSession) GetBestKeyHash(startBatchIndex uint64) ([32]byte, error) {\n\treturn _KeyBroadcastContract.Contract.GetBestKeyHash(&_KeyBroadcastContract.CallOpts, startBatchIndex)\n}", "func (st *RedBlackBST) Max() Key {\n\tif st.IsEmpty() {\n\t\tpanic(\"call Max on empty RedBlackbst\")\n\t}\n\treturn st.max(st.root).key\n}", "func (_KeyBroadcastContract *KeyBroadcastContractCaller) GetBestKeyHash(opts *bind.CallOpts, startBatchIndex uint64) ([32]byte, error) {\n\tvar out []interface{}\n\terr := _KeyBroadcastContract.contract.Call(opts, &out, \"getBestKeyHash\", startBatchIndex)\n\n\tif err != nil {\n\t\treturn *new([32]byte), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte)\n\n\treturn out0, err\n\n}", "func (t *T) Max() (k int32, d interface{}) {\n\treturn t.root.max().nilOrKeyAndData()\n}", "func (sa *SuffixArray) MaxValue() uint64 { return sa.ba.MaxValue() }", "func (llvrw *ValueReaderWriter) WriteMaxKey() error {\n\tllvrw.checkdepth()\n\tllvrw.Invoked = WriteMaxKey\n\tif llvrw.ErrAfter == llvrw.Invoked {\n\t\treturn llvrw.Err\n\t}\n\treturn nil\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
MinKey returns the minimum key having the given searchPrefix, or the minimum key in the whole index if searchIndex is nil. Minimum means the first key in the lexicographic order. If keys are uint64 in BigEndian it is also the smallest number. If ok is false the index is empty.
func (idx *Tree) MinKey(searchPrefix []byte) (v uint64, ok bool) { raw, _ := idx.partialSearch(searchPrefix) if raw == 0 { return 0, false } if isLeaf(raw) { return getLeafValue(raw), true } // now find the min searchLoop: for { _, node, count, prefixLen := explodeNode(raw) block := int(node >> blockSlotsShift) offset := int(node & blockSlotsOffsetMask) data := idx.blocks[block].data[offset:] var prefixSlots int if prefixLen > 0 { if prefixLen == 255 { prefixLen = int(data[0]) prefixSlots = (prefixLen + 15) >> 3 } else { prefixSlots = (prefixLen + 7) >> 3 } data = data[prefixSlots:] } if count >= fullAllocFrom { // find min, iterate from bottom for k := range data[:count] { a := atomic.LoadUint64(&data[k]) if a != 0 { if isLeaf(a) { return getLeafValue(a), true } raw = a continue searchLoop } } // BUG: this might happen if all children in the node has been deleted, since we currently don't shrink node-256. we should go back in the tree! return 0, false } // load first child (since they are ordered) a := atomic.LoadUint64(&data[0]) if isLeaf(a) { return getLeafValue(a), true } raw = a } }
[ "func (mi *MetricIndex) FindLowestPrefix(path string) (iter iterator.Iterator, reg *regexp.Regexp, prefix string, err error) {\n\n\tsegs := strings.Split(path, \".\")\n\tpLen := len(segs)\n\n\t// find the longest chunk w/o a reg and that will be the level db prefix filter\n\tneedsRegex := needRegex(path)\n\n\tlongChunk := \"\"\n\tuseKey := path\n\tuseKeyLen := pLen - 1\n\tif needsRegex {\n\t\tfor _, pth := range segs {\n\t\t\tif needRegex(pth) {\n\t\t\t\tuseKey = longChunk\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif len(longChunk) > 0 {\n\t\t\t\tlongChunk += \".\"\n\t\t\t}\n\t\t\tlongChunk += pth\n\t\t}\n\t\treg, err = regifyKey(path)\n\t\tif err != nil {\n\t\t\treturn nil, nil, \"\", err\n\t\t}\n\n\t}\n\n\t// need to figure out the proper partition to use from the raw key\n\tuseDB := mi.whichPartition([]byte(path))\n\n\t// we simply troll the {len}:{prefix} world\n\tprefix = fmt.Sprintf(\"%d:%s\", useKeyLen, useKey)\n\n\treturn mi.FindIter(prefix, useDB), reg, prefix, err\n}", "func (b *storeBuilder) MinKey() uint32 {\n\treturn b.minKey\n}", "func MinKey() Val { return Val{t: bsontype.MinKey} }", "func (this *AllOne) GetMinKey() string {\n if len(this.m) == 0{\n return \"\"\n }\n return this.g[this.min][0].k\n}", "func (ctx *DataContext) MinVersionKey(tk TKey) (Key, error) {\n\tkey := append([]byte{dataKeyPrefix}, ctx.data.InstanceID().Bytes()...)\n\tkey = append(key, tk...)\n\tkey = append(key, dvid.VersionID(0).Bytes()...)\n\tkey = append(key, dvid.ClientID(0).Bytes()...)\n\treturn append(key, 0), nil\n}", "func (t *Trie) GetShortestPrefix(key string) interface{} {\n\treturn t.getShortestPrefix(key, false)\n}", "func (p *partitionImpl) FindFirstKey(key uint64) (int, error) {\n\tl := 0\n\tr := p.GetNumRows() - 1\n\tfor l <= r {\n\t\tm := (l + r) >> 1\n\t\tif key > p.keys[m] {\n\t\t\tl = m + 1\n\t\t} else if key < p.keys[m] {\n\t\t\tr = m - 1\n\t\t} else if l != m {\n\t\t\tr = m\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tif l < len(p.keys) && key == p.keys[l] {\n\t\treturn l, nil\n\t}\n\treturn l, errors.MissingKeyError{}\n}", "func (t *PrefixStoreByteTrie) PrefixSearch(prefix []byte) *list.List {\n\tif len(prefix) > t.maxKeySizeInBytes {\n\t\treturn list.New()\n\t}\n\tsubTrie := t.get(prefix)\n\tif subTrie == nil {\n\t\treturn list.New()\n\t}\n\n\tentries := subTrie.list(prefix)\n\tif subTrie.isLast {\n\t\tentries.PushFront(prefix)\n\t}\n\treturn entries\n}", "func (st *RedBlackBST) Min() Key {\n\tif st.IsEmpty() {\n\t\tpanic(\"call Min on empty RedBlackbst\")\n\t}\n\treturn st.min(st.root).key\n}", "func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) {\n\tvar last *leafNode\n\tsearch := k\n\tfor {\n\t\t// Look for a leaf node\n\t\tif n.isLeaf() {\n\t\t\tlast = n.leaf\n\t\t}\n\n\t\t// Check for key exhaution\n\t\tif len(search) == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\t// Look for an edge\n\t\t_, n = n.getEdge(search[0])\n\t\tif n == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Consume the search prefix\n\t\tif bytes.HasPrefix(search, n.prefix) {\n\t\t\tsearch = search[len(n.prefix):]\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tif last != nil {\n\t\treturn last.key, last.val, true\n\t}\n\treturn nil, nil, false\n}", "func FindKthMin(nums []int, k int) (int, error) {\n\tindex := k - 1\n\treturn kthNumber(nums, index)\n}", "func (t *BoundedTable) IndexPrefix() kv.Key {\n\treturn nil\n}", "func lowestMatch(op string) int {\n\tfor i, prec := range precs {\n\t\tfor _, op2 := range prec {\n\t\t\tif op == op2 {\n\t\t\t\treturn i\n\t\t\t}\n\t\t}\n\t}\n\treturn -1\n}", "func (n *Node) Min() int {\n\tif n.Left == nil {\n\t\treturn n.Key\n\t}\n\treturn n.Left.Min()\n}", "func (t *T) Min() (k int32, d interface{}) {\n\treturn t.root.min().nilOrKeyAndData()\n}", "func (s *Store) FindPrefix(bucket, prefix []byte, next func(key, val []byte) bool) error {\n\treturn s.db.View(func(tx *bolt.Tx) error {\n\t\tc := tx.Bucket(bucket).Cursor()\n\t\tfor k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {\n\t\t\tif !next(k, v) {\n\t\t\t\treturn io.EOF\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n}", "func (t *Trie) GetShortestPrefixRK(key string) interface{} {\n\treturn t.getShortestPrefix(key, true)\n}", "func (rb *Bitmap) Minimum() uint32 {\n\tif len(rb.highlowcontainer.containers) == 0 {\n\t\tpanic(\"Empty bitmap\")\n\t}\n\treturn uint32(rb.highlowcontainer.containers[0].minimum()) | (uint32(rb.highlowcontainer.keys[0]) << 16)\n}", "func FindKthMin(arr []int, k int) (int, bool) {\n\tif arr == nil || len(arr) < 1 || k < 0 || k >= len(arr) {\n\t\treturn 0, false\n\t}\n\tlo, hi := 0, len(arr)-1\n\tindex := increasingPartition(arr, lo, hi)\n\tfor index != k-1 {\n\t\tif index < k-1 {\n\t\t\tlo = index + 1\n\t\t} else {\n\t\t\thi = index - 1\n\t\t}\n\t\tindex = increasingPartition(arr, lo, hi)\n\t}\n\treturn arr[index], true\n}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }